diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net index e1b2e785bba80ea468e6da0a3b055f39a0fce9d8..beb8ec4dabbc648dd629e16a076662c9b78ff529 100644 --- a/Documentation/ABI/testing/sysfs-class-net +++ b/Documentation/ABI/testing/sysfs-class-net @@ -216,3 +216,11 @@ Contact: netdev@vger.kernel.org Description: Indicates the interface protocol type as a decimal value. See include/uapi/linux/if_arp.h for all possible values. + +What: /sys/class/net//phys_switch_id +Date: November 2014 +KernelVersion: 3.19 +Contact: netdev@vger.kernel.org +Description: + Indicates the unique physical switch identifier of a switch this + port belongs to, as a string. diff --git a/Documentation/Changes b/Documentation/Changes index 1de131bb49fbfc9a48c25d118d78d52d0eab9842..74bdda9272a438949132f02ebc3d6a365681dd43 100644 --- a/Documentation/Changes +++ b/Documentation/Changes @@ -383,7 +383,7 @@ o Ip-route2 --------- -o +o OProfile -------- diff --git a/Documentation/devicetree/bindings/btmrvl.txt b/Documentation/devicetree/bindings/btmrvl.txt new file mode 100644 index 0000000000000000000000000000000000000000..58f964bb0a527fcdfb976d7c9d678e988004ad21 --- /dev/null +++ b/Documentation/devicetree/bindings/btmrvl.txt @@ -0,0 +1,29 @@ +btmrvl +------ + +Required properties: + + - compatible : must be "btmrvl,cfgdata" + +Optional properties: + + - btmrvl,cal-data : Calibration data downloaded to the device during + initialization. This is an array of 28 values(u8). + + - btmrvl,gpio-gap : gpio and gap (in msecs) combination to be + configured. + +Example: + +GPIO pin 13 is configured as a wakeup source and GAP is set to 100 msecs +in below example. + +btmrvl { + compatible = "btmrvl,cfgdata"; + + btmrvl,cal-data = /bits/ 8 < + 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 + 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 + 0x00 0x00 0xf0 0x00>; + btmrvl,gpio-gap = <0x0d64>; +}; diff --git a/Documentation/devicetree/bindings/bus/bcma.txt b/Documentation/devicetree/bindings/bus/bcma.txt index 62a48348ac1561935416f7d0bd4e4c5357b60429..edd44d8021392f27b2f3fa1655068f4dbd3b2d8c 100644 --- a/Documentation/devicetree/bindings/bus/bcma.txt +++ b/Documentation/devicetree/bindings/bus/bcma.txt @@ -8,6 +8,11 @@ Required properties: The cores on the AXI bus are automatically detected by bcma with the memory ranges they are using and they get registered afterwards. +Automatic detection of the IRQ number is not working on +BCM47xx/BCM53xx ARM SoCs. To assign IRQ numbers to the cores, provide +them manually through device tree. Use an interrupt-map to specify the +IRQ used by the devices on the bus. The first address is just an index, +because we do not have any special register. The top-level axi bus may contain children representing attached cores (devices). This is needed since some hardware details can't be auto @@ -22,6 +27,22 @@ Example: ranges = <0x00000000 0x18000000 0x00100000>; #address-cells = <1>; #size-cells = <1>; + #interrupt-cells = <1>; + interrupt-map-mask = <0x000fffff 0xffff>; + interrupt-map = + /* Ethernet Controller 0 */ + <0x00024000 0 &gic GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>, + + /* Ethernet Controller 1 */ + <0x00025000 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>; + + /* PCIe Controller 0 */ + <0x00012000 0 &gic GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>, + <0x00012000 1 &gic GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>, + <0x00012000 2 &gic GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, + <0x00012000 3 &gic GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>, + <0x00012000 4 &gic GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>, + <0x00012000 5 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>; chipcommon { reg = <0x00000000 0x1000>; diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt index 41354f730beba333d1921d428972459aacbd65cd..26efd526d16c29cc45de438aef24adb8f34898bd 100644 --- a/Documentation/devicetree/bindings/net/amd-xgbe.txt +++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt @@ -7,7 +7,10 @@ Required properties: - PCS registers - interrupt-parent: Should be the phandle for the interrupt controller that services interrupts for this device -- interrupts: Should contain the amd-xgbe interrupt +- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt + listed is required and is the general device interrupt. If the optional + amd,per-channel-interrupt property is specified, then one additional + interrupt for each DMA channel supported by the device should be specified - clocks: - DMA clock for the amd-xgbe device (used for calculating the correct Rx interrupt watchdog timer value on a DMA channel @@ -23,6 +26,9 @@ Optional properties: - mac-address: mac address to be assigned to the device. Can be overridden by UEFI. - dma-coherent: Present if dma operations are coherent +- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate + a unique interrupt for each DMA channel - this requires an additional + interrupt be configured for each DMA channel Example: xgbe@e0700000 { @@ -30,7 +36,9 @@ Example: reg = <0 0xe0700000 0 0x80000>, <0 0xe0780000 0 0x80000>; interrupt-parent = <&gic>; - interrupts = <0 325 4>; + interrupts = <0 325 4>, + <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>; + amd,per-channel-interrupt; clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>; clock-names = "dma_clk", "ptp_clk"; phy-handle = <&phy>; diff --git a/Documentation/devicetree/bindings/net/can/c_can.txt b/Documentation/devicetree/bindings/net/can/c_can.txt index 8f1ae81228e3a9bc5a330398eb1c146a84948d61..5a1d8b0c39e97af6d3ef56589f0196541d912a51 100644 --- a/Documentation/devicetree/bindings/net/can/c_can.txt +++ b/Documentation/devicetree/bindings/net/can/c_can.txt @@ -4,6 +4,8 @@ Bosch C_CAN/D_CAN controller Device Tree Bindings Required properties: - compatible : Should be "bosch,c_can" for C_CAN controllers and "bosch,d_can" for D_CAN controllers. + Can be "ti,dra7-d_can", "ti,am3352-d_can" or + "ti,am4372-d_can". - reg : physical base address and size of the C_CAN/D_CAN registers map - interrupts : property with a value describing the interrupt @@ -12,6 +14,9 @@ Required properties: Optional properties: - ti,hwmods : Must be "d_can" or "c_can", n being the instance number +- syscon-raminit : Handle to system control region that contains the + RAMINIT register, register offset to the RAMINIT + register and the CAN instance number (0 offset). Note: "ti,hwmods" field is used to fetch the base address and irq resources from TI, omap hwmod data base during device registration. diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index a62c889aafcaf070774bc3e9bc93c96ba9ea1992..e124847443f87c9465ec82dc03ce792d2d1cf5c4 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt @@ -10,7 +10,7 @@ Required properties: - dsa,ethernet : Should be a phandle to a valid Ethernet device node - dsa,mii-bus : Should be a phandle to a valid MDIO bus device node -Optionnal properties: +Optional properties: - interrupts : property with a value describing the switch interrupt number (not supported by the driver) @@ -23,6 +23,13 @@ Each of these switch child nodes should have the following required properties: - #address-cells : Must be 1 - #size-cells : Must be 0 +A switch child node has the following optional property: + +- eeprom-length : Set to the length of an EEPROM connected to the + switch. Must be set if the switch can not detect + the presence and/or size of a connected EEPROM, + otherwise optional. + A switch may have multiple "port" children nodes Each port children node must have the following mandatory properties: diff --git a/Documentation/devicetree/bindings/net/micrel.txt b/Documentation/devicetree/bindings/net/micrel.txt index e1d99b95c4ec984e2adbf80b808a4bb0f0f90f05..87496a8c64ab5d8d3f84497454653f0cfb395c16 100644 --- a/Documentation/devicetree/bindings/net/micrel.txt +++ b/Documentation/devicetree/bindings/net/micrel.txt @@ -6,19 +6,32 @@ Optional properties: - micrel,led-mode : LED mode value to set for PHYs with configurable LEDs. - Configure the LED mode with single value. The list of PHYs and - the bits that are currently supported: + Configure the LED mode with single value. The list of PHYs and the + bits that are currently supported: - KSZ8001: register 0x1e, bits 15..14 - KSZ8041: register 0x1e, bits 15..14 - KSZ8021: register 0x1f, bits 5..4 - KSZ8031: register 0x1f, bits 5..4 - KSZ8051: register 0x1f, bits 5..4 + KSZ8001: register 0x1e, bits 15..14 + KSZ8041: register 0x1e, bits 15..14 + KSZ8021: register 0x1f, bits 5..4 + KSZ8031: register 0x1f, bits 5..4 + KSZ8051: register 0x1f, bits 5..4 + KSZ8081: register 0x1f, bits 5..4 + KSZ8091: register 0x1f, bits 5..4 - See the respective PHY datasheet for the mode values. + See the respective PHY datasheet for the mode values. + + - micrel,rmii-reference-clock-select-25-mhz: RMII Reference Clock Select + bit selects 25 MHz mode + + Setting the RMII Reference Clock Select bit enables 25 MHz rather + than 50 MHz clock mode. + + Note that this option in only needed for certain PHY revisions with a + non-standard, inverted function of this configuration bit. + Specifically, a clock reference ("rmii-ref" below) is always needed to + actually select a mode. - clocks, clock-names: contains clocks according to the common clock bindings. - supported clocks: - - KSZ8021, KSZ8031: "rmii-ref": The RMII refence input clock. Used - to determine the XI input clock. + supported clocks: + - KSZ8021, KSZ8031, KSZ8081, KSZ8091: "rmii-ref": The RMII reference + input clock. Used to determine the XI input clock. diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index 5b8c5890307773cf52b62eded7c1869a9b7aa2a3..40831fbaff72102d6ff608ae06c1ddf4bedcdbae 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt @@ -19,7 +19,6 @@ Optional Properties: specifications. If neither of these are specified, the default is to assume clause 22. The compatible list may also contain other elements. -- max-speed: Maximum PHY supported speed (10, 100, 1000...) If the phy's identifier is known then the list may contain an entry of the form: "ethernet-phy-idAAAA.BBBB" where @@ -29,6 +28,8 @@ Optional Properties: 4 hex digits. This is the chip vendor OUI bits 19:24, followed by 10 bits of a vendor specific ID. +- max-speed: Maximum PHY supported speed (10, 100, 1000...) + Example: ethernet-phy@0 { diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt index 34d4db1a4e257a6efcf86aac42813de920214329..2f6ec85fda8e1bced5605f998565d68624656703 100644 --- a/Documentation/devicetree/bindings/net/sh_eth.txt +++ b/Documentation/devicetree/bindings/net/sh_eth.txt @@ -9,6 +9,7 @@ Required properties: "renesas,ether-r8a7779" if the device is a part of R8A7779 SoC. "renesas,ether-r8a7790" if the device is a part of R8A7790 SoC. "renesas,ether-r8a7791" if the device is a part of R8A7791 SoC. + "renesas,ether-r8a7793" if the device is a part of R8A7793 SoC. "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC. "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC. - reg: offset and length of (1) the E-DMAC/feLic register block (required), diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index eeb5b2e97bedac5ce910a06cc03bf42035d544d4..83bf4986baeabbcc75e03abd5e3d50edd3e97962 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -2230,11 +2230,8 @@ balance-rr: This mode is the only mode that will permit a single It is possible to adjust TCP/IP's congestion limits by altering the net.ipv4.tcp_reordering sysctl parameter. The - usual default value is 3, and the maximum useful value is 127. - For a four interface balance-rr bond, expect that a single - TCP/IP stream will utilize no more than approximately 2.3 - interface's worth of throughput, even after adjusting - tcp_reordering. + usual default value is 3. But keep in mind TCP stack is able + to automatically increase this when it detects reorders. Note that the fraction of packets that will be delivered out of order is highly variable, and is unlikely to be zero. The level diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index a476b08a43e0dac5642a0bdfba76b8523be1033e..9bffdfc648dc66149401296d73eb6fc04564ebd1 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -383,9 +383,17 @@ tcp_orphan_retries - INTEGER may consume significant resources. Cf. tcp_max_orphans. tcp_reordering - INTEGER - Maximal reordering of packets in a TCP stream. + Initial reordering level of packets in a TCP stream. + TCP stack can then dynamically adjust flow reordering level + between this initial value and tcp_max_reordering Default: 3 +tcp_max_reordering - INTEGER + Maximal reordering level of packets in a TCP stream. + 300 is a fairly conservative value, but you might increase it + if paths are using per packet load balancing (like bonding rr mode) + Default: 300 + tcp_retrans_collapse - BOOLEAN Bug-to-bug compatibility with some broken printers. On retransmit try to send bigger packets to work around bugs in @@ -1466,6 +1474,19 @@ suppress_frag_ndisc - INTEGER 1 - (default) discard fragmented neighbor discovery packets 0 - allow fragmented neighbor discovery packets +optimistic_dad - BOOLEAN + Whether to perform Optimistic Duplicate Address Detection (RFC 4429). + 0: disabled (default) + 1: enabled + +use_optimistic - BOOLEAN + If enabled, do not classify optimistic addresses as deprecated during + source address selection. Preferred addresses will still be chosen + before optimistic addresses, subject to other ranking in the source + address selection algorithm. + 0: disabled (default) + 1: enabled + icmp/*: ratelimit - INTEGER Limit the maximal rates for sending ICMPv6 packets. diff --git a/Documentation/networking/ipvlan.txt b/Documentation/networking/ipvlan.txt new file mode 100644 index 0000000000000000000000000000000000000000..cf996394e466b708d5ca02757143205d519ceb5b --- /dev/null +++ b/Documentation/networking/ipvlan.txt @@ -0,0 +1,107 @@ + + IPVLAN Driver HOWTO + +Initial Release: + Mahesh Bandewar + +1. Introduction: + This is conceptually very similar to the macvlan driver with one major +exception of using L3 for mux-ing /demux-ing among slaves. This property makes +the master device share the L2 with it's slave devices. I have developed this +driver in conjuntion with network namespaces and not sure if there is use case +outside of it. + + +2. Building and Installation: + In order to build the driver, please select the config item CONFIG_IPVLAN. +The driver can be built into the kernel (CONFIG_IPVLAN=y) or as a module +(CONFIG_IPVLAN=m). + + +3. Configuration: + There are no module parameters for this driver and it can be configured +using IProute2/ip utility. + + ip link add link type ipvlan mode { l2 | L3 } + + e.g. ip link add link ipvl0 eth0 type ipvlan mode l2 + + +4. Operating modes: + IPvlan has two modes of operation - L2 and L3. For a given master device, +you can select one of these two modes and all slaves on that master will +operate in the same (selected) mode. The RX mode is almost identical except +that in L3 mode the slaves wont receive any multicast / broadcast traffic. +L3 mode is more restrictive since routing is controlled from the other (mostly) +default namespace. + +4.1 L2 mode: + In this mode TX processing happens on the stack instance attached to the +slave device and packets are switched and queued to the master device to send +out. In this mode the slaves will RX/TX multicast and broadcast (if applicable) +as well. + +4.2 L3 mode: + In this mode TX processing upto L3 happens on the stack instance attached +to the slave device and packets are switched to the stack instance of the +master device for the L2 processing and routing from that instance will be +used before packets are queued on the outbound device. In this mode the slaves +will not receive nor can send multicast / broadcast traffic. + + +5. What to choose (macvlan vs. ipvlan)? + These two devices are very similar in many regards and the specific use +case could very well define which device to choose. if one of the following +situations defines your use case then you can choose to use ipvlan - + (a) The Linux host that is connected to the external switch / router has +policy configured that allows only one mac per port. + (b) No of virtual devices created on a master exceed the mac capacity and +puts the NIC in promiscous mode and degraded performance is a concern. + (c) If the slave device is to be put into the hostile / untrusted network +namespace where L2 on the slave could be changed / misused. + + +6. Example configuration: + + +=============================================================+ + | Host: host1 | + | | + | +----------------------+ +----------------------+ | + | | NS:ns0 | | NS:ns1 | | + | | | | | | + | | | | | | + | | ipvl0 | | ipvl1 | | + | +----------#-----------+ +-----------#----------+ | + | # # | + | ################################ | + | # eth0 | + +==============================#==============================+ + + + (a) Create two network namespaces - ns0, ns1 + ip netns add ns0 + ip netns add ns1 + + (b) Create two ipvlan slaves on eth0 (master device) + ip link add link eth0 ipvl0 type ipvlan mode l2 + ip link add link eth0 ipvl1 type ipvlan mode l2 + + (c) Assign slaves to the respective network namespaces + ip link set dev ipvl0 netns ns0 + ip link set dev ipvl1 netns ns1 + + (d) Now switch to the namespace (ns0 or ns1) to configure the slave devices + - For ns0 + (1) ip netns exec ns0 bash + (2) ip link set dev ipvl0 up + (3) ip link set dev lo up + (4) ip -4 addr add 127.0.0.1 dev lo + (5) ip -4 addr add $IPADDR dev ipvl0 + (6) ip -4 route add default via $ROUTER dev ipvl0 + - For ns1 + (1) ip netns exec ns1 bash + (2) ip link set dev ipvl1 up + (3) ip link set dev lo up + (4) ip -4 addr add 127.0.0.1 dev lo + (5) ip -4 addr add $IPADDR dev ipvl1 + (6) ip -4 route add default via $ROUTER dev ipvl1 diff --git a/Documentation/networking/ixgbe.txt b/Documentation/networking/ixgbe.txt index 96cccebb839b3f52fcfd318f37731489e2be45f0..0ace6e776ac86d93c9f96f4ca1814eee71454624 100644 --- a/Documentation/networking/ixgbe.txt +++ b/Documentation/networking/ixgbe.txt @@ -138,7 +138,7 @@ Other ethtool Commands: To enable Flow Director ethtool -K ethX ntuple on To add a filter - Use -U switch. e.g., ethtool -U ethX flow-type tcp4 src-ip 0x178000a + Use -U switch. e.g., ethtool -U ethX flow-type tcp4 src-ip 10.0.128.23 action 1 To see the list of filters currently present: ethtool -u ethX diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt index 2090895b08d42636a6bc9a4c4bfffacc04b6c94d..e655e2453c9842e8bc9d28cce3b61e79a54c1fbf 100644 --- a/Documentation/networking/stmmac.txt +++ b/Documentation/networking/stmmac.txt @@ -1,12 +1,12 @@ STMicroelectronics 10/100/1000 Synopsys Ethernet driver -Copyright (C) 2007-2013 STMicroelectronics Ltd +Copyright (C) 2007-2014 STMicroelectronics Ltd Author: Giuseppe Cavallaro This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers (Synopsys IP blocks). -Currently this network device driver is for all STM embedded MAC/GMAC +Currently this network device driver is for all STi embedded MAC/GMAC (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 FF1152AMT0221 D1215994A VIRTEX FPGA board. @@ -22,6 +22,9 @@ The kernel configuration option is STMMAC_ETH: Device Drivers ---> Network device support ---> Ethernet (1000 Mbit) ---> STMicroelectronics 10/100/1000 Ethernet driver (STMMAC_ETH) +CONFIG_STMMAC_PLATFORM: is to enable the platform driver. +CONFIG_STMMAC_PCI: is to enable the pci driver. + 2) Driver parameters list: debug: message level (0: no output, 16: all); phyaddr: to manually provide the physical address to the PHY device; @@ -45,10 +48,11 @@ Driver parameters can be also passed in command line by using: The xmit method is invoked when the kernel needs to transmit a packet; it sets the descriptors in the ring and informs the DMA engine that there is a packet ready to be transmitted. -Once the controller has finished transmitting the packet, an interrupt is -triggered; So the driver will be able to release the socket buffers. By default, the driver sets the NETIF_F_SG bit in the features field of the -net_device structure enabling the scatter/gather feature. +net_device structure enabling the scatter-gather feature. This is true on +chips and configurations where the checksum can be done in hardware. +Once the controller has finished transmitting the packet, napi will be +scheduled to release the transmit resources. 4.2) Receive process When one or more packets are received, an interrupt happens. The interrupts @@ -58,20 +62,12 @@ This is based on NAPI so the interrupt handler signals only if there is work to be done, and it exits. Then the poll method will be scheduled at some future point. The incoming packets are stored, by the DMA, in a list of pre-allocated socket -buffers in order to avoid the memcpy (Zero-copy). +buffers in order to avoid the memcpy (zero-copy). 4.3) Interrupt Mitigation The driver is able to mitigate the number of its DMA interrupts using NAPI for the reception on chips older than the 3.50. New chips have an HW RX-Watchdog used for this mitigation. - -On Tx-side, the mitigation schema is based on a SW timer that calls the -tx function (stmmac_tx) to reclaim the resource after transmitting the -frames. -Also there is another parameter (like a threshold) used to program -the descriptors avoiding to set the interrupt on completion bit in -when the frame is sent (xmit). - Mitigation parameters can be tuned by ethtool. 4.4) WOL @@ -79,7 +75,7 @@ Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC core. 4.5) DMA descriptors -Driver handles both normal and enhanced descriptors. The latter has been only +Driver handles both normal and alternate descriptors. The latter has been only tested on DWC Ether MAC 10/100/1000 Universal version 3.41a and later. STMMAC supports DMA descriptor to operate both in dual buffer (RING) @@ -91,9 +87,20 @@ In CHAINED mode each descriptor will have pointer to next descriptor in the list, hence creating the explicit chaining in the descriptor itself, whereas such explicit chaining is not possible in RING mode. +4.5.1) Extended descriptors + The extended descriptors give us information about the Ethernet payload + when it is carrying PTP packets or TCP/UDP/ICMP over IP. + These are not available on GMAC Synopsys chips older than the 3.50. + At probe time the driver will decide if these can be actually used. + This support also is mandatory for PTPv2 because the extra descriptors + are used for saving the hardware timestamps and Extended Status. + 4.6) Ethtool support -Ethtool is supported. Driver statistics and internal errors can be taken using: -ethtool -S ethX command. It is possible to dump registers etc. +Ethtool is supported. + +For example, driver statistics (including RMON), internal errors can be taken +using: + # ethtool -S ethX command 4.7) Jumbo and Segmentation Offloading Jumbo frames are supported and tested for the GMAC. @@ -101,12 +108,11 @@ The GSO has been also added but it's performed in software. LRO is not supported. 4.8) Physical -The driver is compatible with PAL to work with PHY and GPHY devices. +The driver is compatible with Physical Abstraction Layer to be connected with +PHY and GPHY devices. 4.9) Platform information -Several driver's information can be passed through the platform -These are included in the include/linux/stmmac.h header file -and detailed below as well: +Several information can be passed through the platform and device-tree. struct plat_stmmacenet_data { char *phy_bus_name; @@ -125,15 +131,18 @@ struct plat_stmmacenet_data { int force_sf_dma_mode; int force_thresh_dma_mode; int riwt_off; + int max_speed; + int maxmtu; void (*fix_mac_speed)(void *priv, unsigned int speed); void (*bus_setup)(void __iomem *ioaddr); void *(*setup)(struct platform_device *pdev); + void (*free)(struct platform_device *pdev, void *priv); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); void *custom_cfg; void *custom_data; void *bsp_priv; - }; +}; Where: o phy_bus_name: phy bus name to attach to the stmmac. @@ -258,32 +267,43 @@ and the second one, with a real PHY device attached to the bus, by using the stmmac_mdio_bus_data structure (to provide the id, the reset procedure etc). -4.10) List of source files: - o Kconfig - o Makefile - o stmmac_main.c: main network device driver; - o stmmac_mdio.c: mdio functions; - o stmmac_pci: PCI driver; - o stmmac_platform.c: platform driver - o stmmac_ethtool.c: ethtool support; - o stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts - (only tested on ST40 platforms based); +Note that, starting from new chips, where it is available the HW capability +register, many configurations are discovered at run-time for example to +understand if EEE, HW csum, PTP, enhanced descriptor etc are actually +available. As strategy adopted in this driver, the information from the HW +capability register can replace what has been passed from the platform. + +4.10) Device-tree support. + +Please see the following document: + Documentation/devicetree/bindings/net/stmmac.txt + +and the stmmac_of_data structure inside the include/linux/stmmac.h header file. + +4.11) This is a summary of the content of some relevant files: + o stmmac_main.c: to implement the main network device driver; + o stmmac_mdio.c: to provide mdio functions; + o stmmac_pci: this the PCI driver; + o stmmac_platform.c: this the platform driver (OF supported) + o stmmac_ethtool.c: to implement the ethtool support; o stmmac.h: private driver structure; o common.h: common definitions and VFTs; o descs.h: descriptor structure definitions; - o dwmac1000_core.c: GMAC core functions; - o dwmac1000_dma.c: dma functions for the GMAC chip; - o dwmac1000.h: specific header file for the GMAC; - o dwmac100_core: MAC 100 core and dma code; - o dwmac100_dma.c: dma functions for the MAC chip; + o dwmac1000_core.c: dwmac GiGa core functions; + o dwmac1000_dma.c: dma functions for the GMAC chip; + o dwmac1000.h: specific header file for the dwmac GiGa; + o dwmac100_core: dwmac 100 core code; + o dwmac100_dma.c: dma functions for the dwmac 100 chip; o dwmac1000.h: specific header file for the MAC; - o dwmac_lib.c: generic DMA functions shared among chips; + o dwmac_lib.c: generic DMA functions; o enh_desc.c: functions for handling enhanced descriptors; o norm_desc.c: functions for handling normal descriptors; o chain_mode.c/ring_mode.c:: functions to manage RING/CHAINED modes; o mmc_core.c/mmc.h: Management MAC Counters; - o stmmac_hwtstamp.c: HW timestamp support for PTP - o stmmac_ptp.c: PTP 1588 clock + o stmmac_hwtstamp.c: HW timestamp support for PTP; + o stmmac_ptp.c: PTP 1588 clock; + o dwmac-.c: these are for the platform glue-logic file; e.g. dwmac-sti.c + for STMicroelectronics SoCs. 5) Debug Information @@ -298,23 +318,14 @@ to get statistics: e.g. using: ethtool -S ethX (that shows the Management counters (MMC) if supported) or sees the MAC/DMA registers: e.g. using: ethtool -d ethX -Compiling the Kernel with CONFIG_DEBUG_FS and enabling the -STMMAC_DEBUG_FS option the driver will export the following +Compiling the Kernel with CONFIG_DEBUG_FS the driver will export the following debugfs entries: /sys/kernel/debug/stmmaceth/descriptors_status To show the DMA TX/RX descriptor rings -Developer can also use the "debug" module parameter to get -further debug information. - -In the end, there are other macros (that cannot be enabled -via menuconfig) to turn-on the RX/TX DMA debugging, -specific MAC core debug printk etc. Others to enable the -debug in the TX and RX processes. -All these are only useful during the developing stage -and should never enabled inside the code for general usage. -In fact, these can generate an huge amount of debug messages. +Developer can also use the "debug" module parameter to get further debug +information (please see: NETIF Msg Level). 6) Energy Efficient Ethernet @@ -337,15 +348,7 @@ To enter in Tx LPI mode the driver needs to have a software timer that enable and disable the LPI mode when there is nothing to be transmitted. -7) Extended descriptors -The extended descriptors give us information about the receive Ethernet payload -when it is carrying PTP packets or TCP/UDP/ICMP over IP. -These are not available on GMAC Synopsys chips older than the 3.50. -At probe time the driver will decide if these can be actually used. -This support also is mandatory for PTPv2 because the extra descriptors 6 and 7 -are used for saving the hardware timestamps. - -8) Precision Time Protocol (PTP) +7) Precision Time Protocol (PTP) The driver supports the IEEE 1588-2002, Precision Time Protocol (PTP), which enables precise synchronization of clocks in measurement and control systems implemented with technologies such as network @@ -355,7 +358,7 @@ In addition to the basic timestamp features mentioned in IEEE 1588-2002 Timestamps, new GMAC cores support the advanced timestamp features. IEEE 1588-2008 that can be enabled when configure the Kernel. -9) SGMII/RGMII supports +8) SGMII/RGMII supports New GMAC devices provide own way to manage RGMII/SGMII. This information is available at run-time by looking at the HW capability register. This means that the stmmac can manage @@ -364,8 +367,3 @@ In fact, the HW provides a subset of extended registers to restart the ANE, verify Full/Half duplex mode and Speed. Also thanks to these registers it is possible to look at the Auto-negotiated Link Parter Ability. - -10) TODO: - o XGMAC is not supported. - o Complete the TBI & RTBI support. - o extend VLAN support for 3.70a SYNP GMAC. diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt new file mode 100644 index 0000000000000000000000000000000000000000..f981a9295a39a14245f884b1fbc810817f74999c --- /dev/null +++ b/Documentation/networking/switchdev.txt @@ -0,0 +1,59 @@ +Switch (and switch-ish) device drivers HOWTO +=========================== + +Please note that the word "switch" is here used in very generic meaning. +This include devices supporting L2/L3 but also various flow offloading chips, +including switches embedded into SR-IOV NICs. + +Lets describe a topology a bit. Imagine the following example: + + +----------------------------+ +---------------+ + | SOME switch chip | | CPU | + +----------------------------+ +---------------+ + port1 port2 port3 port4 MNGMNT | PCI-E | + | | | | | +---------------+ + PHY PHY | | | | NIC0 NIC1 + | | | | | | + | | +- PCI-E -+ | | + | +------- MII -------+ | + +------------- MII ------------+ + +In this example, there are two independent lines between the switch silicon +and CPU. NIC0 and NIC1 drivers are not aware of a switch presence. They are +separate from the switch driver. SOME switch chip is by managed by a driver +via PCI-E device MNGMNT. Note that MNGMNT device, NIC0 and NIC1 may be +connected to some other type of bus. + +Now, for the previous example show the representation in kernel: + + +----------------------------+ +---------------+ + | SOME switch chip | | CPU | + +----------------------------+ +---------------+ + sw0p0 sw0p1 sw0p2 sw0p3 MNGMNT | PCI-E | + | | | | | +---------------+ + PHY PHY | | | | eth0 eth1 + | | | | | | + | | +- PCI-E -+ | | + | +------- MII -------+ | + +------------- MII ------------+ + +Lets call the example switch driver for SOME switch chip "SOMEswitch". This +driver takes care of PCI-E device MNGMNT. There is a netdevice instance sw0pX +created for each port of a switch. These netdevices are instances +of "SOMEswitch" driver. sw0pX netdevices serve as a "representation" +of the switch chip. eth0 and eth1 are instances of some other existing driver. + +The only difference of the switch-port netdevice from the ordinary netdevice +is that is implements couple more NDOs: + + ndo_switch_parent_id_get - This returns the same ID for two port netdevices + of the same physical switch chip. This is + mandatory to be implemented by all switch drivers + and serves the caller for recognition of a port + netdevice. + ndo_switch_parent_* - Functions that serve for a manipulation of the switch + chip itself (it can be though of as a "parent" of the + port, therefore the name). They are not port-specific. + Caller might use arbitrary port netdevice of the same + switch and it will make no difference. + ndo_switch_port_* - Functions that serve for a port-specific manipulation. diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 1d6d02d6ba52b531642436e8a6d8c0640af5467d..a5c784c893122788c8fd1d4db270576330b02518 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -122,7 +122,7 @@ SOF_TIMESTAMPING_RAW_HARDWARE: 1.3.3 Timestamp Options -The interface supports one option +The interface supports the options SOF_TIMESTAMPING_OPT_ID: @@ -130,19 +130,36 @@ SOF_TIMESTAMPING_OPT_ID: have multiple concurrent timestamping requests outstanding. Packets can be reordered in the transmit path, for instance in the packet scheduler. In that case timestamps will be queued onto the error - queue out of order from the original send() calls. This option - embeds a counter that is incremented at send() time, to order - timestamps within a flow. + queue out of order from the original send() calls. It is not always + possible to uniquely match timestamps to the original send() calls + based on timestamp order or payload inspection alone, then. + + This option associates each packet at send() with a unique + identifier and returns that along with the timestamp. The identifier + is derived from a per-socket u32 counter (that wraps). For datagram + sockets, the counter increments with each sent packet. For stream + sockets, it increments with every byte. + + The counter starts at zero. It is initialized the first time that + the socket option is enabled. It is reset each time the option is + enabled after having been disabled. Resetting the counter does not + change the identifiers of existing packets in the system. This option is implemented only for transmit timestamps. There, the timestamp is always looped along with a struct sock_extended_err. The option modifies field ee_data to pass an id that is unique among all possibly concurrently outstanding timestamp requests for - that socket. In practice, it is a monotonically increasing u32 - (that wraps). + that socket. + + +SOF_TIMESTAMPING_OPT_CMSG: - In datagram sockets, the counter increments on each send call. In - stream sockets, it increments with every byte. + Support recv() cmsg for all timestamped packets. Control messages + are already supported unconditionally on all packets with receive + timestamps and on IPv6 packets with transmit timestamp. This option + extends them to IPv4 packets with transmit timestamp. One use case + is to correlate packets with their egress device, by enabling socket + option IP_PKTINFO simultaneously. 1.4 Bytestream Timestamps diff --git a/Documentation/networking/timestamping/txtimestamp.c b/Documentation/networking/timestamping/txtimestamp.c index b32fc2a07734ca28ac60a30913dab738b8111f57..876f71c5625aba4686fe7aede6769ccf6f700110 100644 --- a/Documentation/networking/timestamping/txtimestamp.c +++ b/Documentation/networking/timestamping/txtimestamp.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -58,6 +59,14 @@ #include #include +/* ugly hack to work around netinet/in.h and linux/ipv6.h conflicts */ +#ifndef in6_pktinfo +struct in6_pktinfo { + struct in6_addr ipi6_addr; + int ipi6_ifindex; +}; +#endif + /* command line parameters */ static int cfg_proto = SOCK_STREAM; static int cfg_ipproto = IPPROTO_TCP; @@ -65,6 +74,8 @@ static int cfg_num_pkts = 4; static int do_ipv4 = 1; static int do_ipv6 = 1; static int cfg_payload_len = 10; +static bool cfg_show_payload; +static bool cfg_do_pktinfo; static uint16_t dest_port = 9000; static struct sockaddr_in daddr; @@ -131,6 +142,30 @@ static void print_timestamp(struct scm_timestamping *tss, int tstype, __print_timestamp(tsname, &tss->ts[0], tskey, payload_len); } +/* TODO: convert to check_and_print payload once API is stable */ +static void print_payload(char *data, int len) +{ + int i; + + if (len > 70) + len = 70; + + fprintf(stderr, "payload: "); + for (i = 0; i < len; i++) + fprintf(stderr, "%02hhx ", data[i]); + fprintf(stderr, "\n"); +} + +static void print_pktinfo(int family, int ifindex, void *saddr, void *daddr) +{ + char sa[INET6_ADDRSTRLEN], da[INET6_ADDRSTRLEN]; + + fprintf(stderr, " pktinfo: ifindex=%u src=%s dst=%s\n", + ifindex, + saddr ? inet_ntop(family, saddr, sa, sizeof(sa)) : "unknown", + daddr ? inet_ntop(family, daddr, da, sizeof(da)) : "unknown"); +} + static void __poll(int fd) { struct pollfd pollfd; @@ -156,10 +191,9 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len) cm->cmsg_type == SCM_TIMESTAMPING) { tss = (void *) CMSG_DATA(cm); } else if ((cm->cmsg_level == SOL_IP && - cm->cmsg_type == IP_RECVERR) || - (cm->cmsg_level == SOL_IPV6 && - cm->cmsg_type == IPV6_RECVERR)) { - + cm->cmsg_type == IP_RECVERR) || + (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_RECVERR)) { serr = (void *) CMSG_DATA(cm); if (serr->ee_errno != ENOMSG || serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) { @@ -168,6 +202,16 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len) serr->ee_origin); serr = NULL; } + } else if (cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_PKTINFO) { + struct in_pktinfo *info = (void *) CMSG_DATA(cm); + print_pktinfo(AF_INET, info->ipi_ifindex, + &info->ipi_spec_dst, &info->ipi_addr); + } else if (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_PKTINFO) { + struct in6_pktinfo *info6 = (void *) CMSG_DATA(cm); + print_pktinfo(AF_INET6, info6->ipi6_ifindex, + NULL, &info6->ipi6_addr); } else fprintf(stderr, "unknown cmsg %d,%d\n", cm->cmsg_level, cm->cmsg_type); @@ -206,7 +250,11 @@ static int recv_errmsg(int fd) if (ret == -1 && errno != EAGAIN) error(1, errno, "recvmsg"); - __recv_errmsg_cmsg(&msg, ret); + if (ret > 0) { + __recv_errmsg_cmsg(&msg, ret); + if (cfg_show_payload) + print_payload(data, cfg_payload_len); + } free(data); return ret == -1; @@ -215,9 +263,9 @@ static int recv_errmsg(int fd) static void do_test(int family, unsigned int opt) { char *buf; - int fd, i, val, total_len; + int fd, i, val = 1, total_len; - if (family == IPPROTO_IPV6 && cfg_proto != SOCK_STREAM) { + if (family == AF_INET6 && cfg_proto != SOCK_STREAM) { /* due to lack of checksum generation code */ fprintf(stderr, "test: skipping datagram over IPv6\n"); return; @@ -239,7 +287,6 @@ static void do_test(int family, unsigned int opt) error(1, errno, "socket"); if (cfg_proto == SOCK_STREAM) { - val = 1; if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char*) &val, sizeof(val))) error(1, 0, "setsockopt no nagle"); @@ -253,7 +300,20 @@ static void do_test(int family, unsigned int opt) } } + if (cfg_do_pktinfo) { + if (family == AF_INET6) { + if (setsockopt(fd, SOL_IPV6, IPV6_RECVPKTINFO, + &val, sizeof(val))) + error(1, errno, "setsockopt pktinfo ipv6"); + } else { + if (setsockopt(fd, SOL_IP, IP_PKTINFO, + &val, sizeof(val))) + error(1, errno, "setsockopt pktinfo ipv4"); + } + } + opt |= SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_OPT_CMSG | SOF_TIMESTAMPING_OPT_ID; if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (char *) &opt, sizeof(opt))) @@ -262,8 +322,6 @@ static void do_test(int family, unsigned int opt) for (i = 0; i < cfg_num_pkts; i++) { memset(&ts_prev, 0, sizeof(ts_prev)); memset(buf, 'a' + i, total_len); - buf[total_len - 2] = '\n'; - buf[total_len - 1] = '\0'; if (cfg_proto == SOCK_RAW) { struct udphdr *udph; @@ -324,11 +382,13 @@ static void __attribute__((noreturn)) usage(const char *filepath) " -4: only IPv4\n" " -6: only IPv6\n" " -h: show this message\n" + " -I: request PKTINFO\n" " -l N: send N bytes at a time\n" " -r: use raw\n" " -R: use raw (IP_HDRINCL)\n" " -p N: connect to port N\n" - " -u: use udp\n", + " -u: use udp\n" + " -x: show payload (up to 70 bytes)\n", filepath); exit(1); } @@ -338,7 +398,7 @@ static void parse_opt(int argc, char **argv) int proto_count = 0; char c; - while ((c = getopt(argc, argv, "46hl:p:rRu")) != -1) { + while ((c = getopt(argc, argv, "46hIl:p:rRux")) != -1) { switch (c) { case '4': do_ipv6 = 0; @@ -346,6 +406,9 @@ static void parse_opt(int argc, char **argv) case '6': do_ipv4 = 0; break; + case 'I': + cfg_do_pktinfo = true; + break; case 'r': proto_count++; cfg_proto = SOCK_RAW; @@ -367,6 +430,9 @@ static void parse_opt(int argc, char **argv) case 'p': dest_port = strtoul(optarg, NULL, 10); break; + case 'x': + cfg_show_payload = true; + break; case 'h': default: usage(argv[0]); diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 04892b821157774b81f4adecae870e06ef6f9bd4..666594b43cfff9f7a50678e3d74bd29c116d3648 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -120,10 +120,14 @@ seconds. warnings -------- -This controls console messages from the networking stack that can occur because -of problems on the network like duplicate address or bad checksums. Normally, -this should be enabled, but if the problem persists the messages can be -disabled. +This sysctl is now unused. + +This was used to control console messages from the networking stack that +occur because of problems on the network like duplicate address or bad +checksums. + +These messages are now emitted at KERN_DEBUG and can generally be enabled +and controlled by the dynamic_debug facility. netdev_budget ------------- @@ -138,6 +142,28 @@ netdev_max_backlog Maximum number of packets, queued on the INPUT side, when the interface receives packets faster than kernel can process them. +netdev_rss_key +-------------- + +RSS (Receive Side Scaling) enabled drivers use a 40 bytes host key that is +randomly generated. +Some user space might need to gather its content even if drivers do not +provide ethtool -x support yet. + +myhost:~# cat /proc/sys/net/core/netdev_rss_key +84:50:f4:00:a8:15:d1:a7:e9:7f:1d:60:35:c7:47:25:42:97:74:ca:56:bb:b6:a1:d8: ... (52 bytes total) + +File contains nul bytes if no driver ever called netdev_rss_key_fill() function. +Note: +/proc/sys/net/core/netdev_rss_key contains 52 bytes of key, +but most drivers only use 40 bytes of it. + +myhost:~# ethtool -x eth0 +RX flow hash indirection table for eth0 with 8 RX ring(s): + 0: 0 1 2 3 4 5 6 7 +RSS hash key: +84:50:f4:00:a8:15:d1:a7:e9:7f:1d:60:35:c7:47:25:42:97:74:ca:56:bb:b6:a1:d8:43:e3:c9:0c:fd:17:55:c2:3a:4d:69:ed:f1:42:89 + netdev_tstamp_prequeue ---------------------- diff --git a/MAINTAINERS b/MAINTAINERS index 132958a4e27c6d86e2c9dcf39871e537ffba2f11..34b4b841da402b0a787bd809bc064e1c6f62f12e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2332,6 +2332,14 @@ F: security/capability.c F: security/commoncap.c F: kernel/capability.c +CC2520 IEEE-802.15.4 RADIO DRIVER +M: Varka Bhadram +L: linux-wpan@vger.kernel.org +S: Maintained +F: drivers/net/ieee802154/cc2520.c +F: include/linux/spi/cc2520.h +F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt + CELL BROADBAND ENGINE ARCHITECTURE M: Arnd Bergmann L: linuxppc-dev@lists.ozlabs.org @@ -4752,6 +4760,13 @@ S: Maintained F: net/ieee802154/ F: net/mac802154/ F: drivers/net/ieee802154/ +F: include/linux/nl802154.h +F: include/linux/ieee802154.h +F: include/net/nl802154.h +F: include/net/mac802154.h +F: include/net/af_ieee802154.h +F: include/net/cfg802154.h +F: include/net/ieee802154_netdev.h F: Documentation/networking/ieee802154.txt IGORPLUG-USB IR RECEIVER @@ -5931,6 +5946,11 @@ M: Russell King S: Maintained F: drivers/gpu/drm/armada/ +MARVELL 88E6352 DSA support +M: Guenter Roeck +S: Maintained +F: drivers/net/dsa/mv88e6352.c + MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) M: Mirko Lindner M: Stephen Hemminger @@ -7956,6 +7976,13 @@ F: drivers/hid/hid-roccat* F: include/linux/hid-roccat* F: Documentation/ABI/*/sysfs-driver-hid-roccat* +ROCKER DRIVER +M: Jiri Pirko +M: Scott Feldman +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/ethernet/rocker/ + ROCKETPORT DRIVER P: Comtrol Corp. W: http://www.comtrol.com @@ -8003,11 +8030,10 @@ S: Maintained F: drivers/media/dvb-frontends/rtl2832_sdr* RTL8180 WIRELESS DRIVER -M: "John W. Linville" L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git -S: Maintained +S: Orphan F: drivers/net/wireless/rtl818x/rtl8180/ RTL8187 WIRELESS DRIVER @@ -9160,6 +9186,13 @@ F: lib/swiotlb.c F: arch/*/kernel/pci-swiotlb.c F: include/linux/swiotlb.h +SWITCHDEV +M: Jiri Pirko +L: netdev@vger.kernel.org +S: Supported +F: net/switchdev/ +F: include/net/switchdev.h + SYNOPSYS ARC ARCHITECTURE M: Vineet Gupta S: Supported diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index 25b49725df07d7ecad8e222060486e9d746925b9..76aeb8fa551a83fa3b84c15b46fd87056e297a21 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 3de1394bcab821984674e89a3ee022cc6dd5f0f2..9a20821b111c65aecc5b60a7956fb00dde178aae 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -87,4 +87,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index b8fffc1a2ac237b8feca4a42cc0fd036a9bc8ba2..be0c39e76f7cdf8855aacf31d24e128fcccd207b 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -12,7 +12,6 @@ generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig index f95f72d62db73de315e4688953ae8ebb285412c4..759f9b0053e294a0cee0f9ce26f63974daa84850 100644 --- a/arch/arm/configs/davinci_all_defconfig +++ b/arch/arm/configs/davinci_all_defconfig @@ -97,7 +97,6 @@ CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m CONFIG_PPP_DEFLATE=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=m CONFIG_INPUT_EVBUG=m diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 70cd84eb7fda0c294c004d83841462bb4df799a5..fe74c0d1e485f01588ec21a927a852a0b0ca156e 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += current.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h -generic-y += hash.h generic-y += ioctl.h generic-y += ipcbuf.h generic-y += irq_regs.h diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index e90a3148f38540c98c9f7a34ccce3f9ab7de7581..b83f3b7737fb9dce2a18c66a086aac2430e98cf6 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -400,7 +400,7 @@ asmlinkage long sys_oabi_sendto(int fd, void __user *buff, return sys_sendto(fd, buff, len, flags, addr, addrlen); } -asmlinkage long sys_oabi_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) +asmlinkage long sys_oabi_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { struct sockaddr __user *addr; int msg_namelen; @@ -446,7 +446,7 @@ asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args) break; case SYS_SENDMSG: if (copy_from_user(a, args, 3 * sizeof(long)) == 0) - r = sys_oabi_sendmsg(a[0], (struct msghdr __user *)a[1], a[2]); + r = sys_oabi_sendmsg(a[0], (struct user_msghdr __user *)a[1], a[2]); break; default: r = sys_socketcall(call, args); diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index d81b2475e67e547cb5009a666b6caf779b300cb1..22762a1f9f726143a7ebc1fab2504dc82b570955 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c @@ -158,6 +158,7 @@ struct pxa168_eth_platform_data gplugd_eth_platform_data = { .port_number = 0, .phy_addr = 0, .speed = 0, /* Autonagotiation */ + .intf = PHY_INTERFACE_MODE_RMII, .init = gplugd_eth_init, }; diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index dc770bd4f5a53c0e9a32d97817c55d6698ec87c1..6b61091c7f4cefdd31f92722a0f81c0e41393762 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -14,7 +14,6 @@ generic-y += early_ioremap.h generic-y += emergency-restart.h generic-y += errno.h generic-y += ftrace.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index 2a71b1cb984821418f1f0f5b84fb19c9bbd6e72f..528d70d47a540d2e0a38c49af916bd442a240661 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h generic-y += futex.h -generic-y += hash.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += local.h diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h index 6e6cd159924b1855aa5f1811ad4e4c60b403c431..2b65ed6b277cada8728d70f26f3df76c40af078b 100644 --- a/arch/avr32/include/uapi/asm/socket.h +++ b/arch/avr32/include/uapi/asm/socket.h @@ -80,4 +80,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _UAPI__ASM_AVR32_SOCKET_H */ diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 46ed6bb9c6798a6ff3ff749f38b0f255e0b64d6e..4bd3c3cfc9ab3a9d4a2dfe8190818f09ffa80db9 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -10,7 +10,6 @@ generic-y += emergency-restart.h generic-y += errno.h generic-y += fb.h generic-y += futex.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ipcbuf.h diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index e77e0c1dbe75ee81a189f4b74702a7110c1328fa..2de73391b81eeee4e1eefc95e9e5d5d666f13527 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -15,7 +15,6 @@ generic-y += exec.h generic-y += fb.h generic-y += fcntl.h generic-y += futex.h -generic-y += hash.h generic-y += hw_irq.h generic-y += io.h generic-y += ioctl.h diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 2ca489eaadd3092efa8bd6a9bba6a808719d193f..d5f124832fd1e0f337259d5467eb0cd286fb91dd 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += linkage.h diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h index ed94e5ed0a238c2750e677ccb806a6bc0a94041a..e2503d9f1869b4e7fead3a5467581169025a80a1 100644 --- a/arch/cris/include/uapi/asm/socket.h +++ b/arch/cris/include/uapi/asm/socket.h @@ -82,6 +82,11 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_SOCKET_H */ diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 3caf05cabfc520a06506e6318203421ad7014f66..e3f81b53578e3b1b6f4f24f2a4a3acf88a076e16 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h index ca2c6e6f31c6817780d31a246652adcc9847e373..4823ad125578246f1b66b3e59003cd542b790cf5 100644 --- a/arch/frv/include/uapi/asm/socket.h +++ b/arch/frv/include/uapi/asm/socket.h @@ -80,5 +80,10 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_SOCKET_H */ diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 5f234a5a23201319e26e9972abd1a31ab44f2d9c..c7a99f860b40f991273273e2a986bf294119416b 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 747320be9d0e1c4a94bc72cad21ed3b44ab2c3f8..9b41b4bcc073b99737cdda269a4f9688de5ab11a 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += clkdev.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += mcs_spinlock.h diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index a1b49bac7951929127ed08db549218c2c16ccf89..59be3d87f86d660d5477b1b07c17d61697e642e8 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -89,4 +89,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index 3796801d6e0cbe4eb862365b7ce149ebb49ae543..2edc793372fc139cfcdef46761b12abd4c96f68d 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += module.h diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index 6c9a24b3aefa3a4f3048c17a7fa06d97b585ec14..7bc4cb27385658014b0d258bc534b24d6e93822f 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -80,4 +80,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index dbaf9f3065e8da6802e4beb32706a936264d597c..9b6c691874bd5d4f309df4a8e5e90b3bc0087bf2 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += device.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ipcbuf.h diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index 7b8111c8f937ba5d5f0820ad64e56cf1e5e4ef6b..0bf5d525b945d3627c05bd1328b86bfb05007dd0 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -13,7 +13,6 @@ generic-y += fb.h generic-y += fcntl.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 448143b8cabd1a8f5e5460d4c79461b0ef6b6bc6..ab564a6db5c32a4dd7ea73361ec0481abacb294f 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -4,7 +4,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += device.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 72e1cf1cab0057f6f5f9fae5dba18308afbc140e..200efeac41813c2a17e237156c6410253f488d8b 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += cputime.h generic-y += current.h generic-y += dma-contiguous.h generic-y += emergency-restart.h -generic-y += hash.h generic-y += irq_work.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index a14baa218c76f14de988ef106bdac5dadc48aceb..dec3c850f36be661ba80f57ef651dd16ff44466a 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -98,4 +98,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 9b55143d19dbad39ad8b7cd7f1b9ced90e8ab2cb..9fd6834a2172ac3cd77115d604a2f07ac8370bc2 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -426,7 +426,7 @@ static inline void emit_mod(unsigned int dst, unsigned int src, u32 *p = &ctx->target[ctx->idx]; uasm_i_divu(&p, dst, src); p = &ctx->target[ctx->idx + 1]; - uasm_i_mflo(&p, dst); + uasm_i_mfhi(&p, dst); } ctx->idx += 2; /* 2 insts */ } @@ -971,7 +971,7 @@ load_ind: break; case BPF_ALU | BPF_MOD | BPF_K: /* A %= k */ - if (k == 1 || optimize_div(&k)) { + if (k == 1) { ctx->flags |= SEEN_A; emit_jit_reg_move(r_A, r_zero, ctx); } else { diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 54a062cb9f2cb9236f4bdc9cef3e028777b11f31..f892d9de47d92656646f0a4bd04f3a5f7268a2a7 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index 6aa3ce1854aa9523d46bc28851eddabd59edeb37..cab7d6d50051b1af1533630567d9114ab9b7ddb8 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -80,4 +80,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_SOCKET_H */ diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index bb160be0dc28cc4f422aac7950d3e23eb6c4134b..01c75f36e8b3b7506512367aa114d0469d1a7df1 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -19,7 +19,6 @@ generic-y += fcntl.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 89b61d7dc790ee7b0127072b1a66d181eae11522..91f1f360a7c4da69fe283ede1fde899bad67c01f 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -25,7 +25,6 @@ generic-y += fcntl.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index ffb024b8423fa55a282fbfa05fbf4fb13464d4c4..8686237a3c3c55251070264c35190c391fb594eb 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -7,7 +7,6 @@ generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h -generic-y += hash.h generic-y += hw_irq.h generic-y += irq_regs.h generic-y += irq_work.h diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index fe35ceacf0e72cad69a43d9b1ce7b8f5ec3da98a..a5cd40cd8ee1cd53109c07d9b94543d9d7045c97 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -79,4 +79,9 @@ #define SO_BPF_EXTENSIONS 0x4029 +#define SO_INCOMING_CPU 0x402A + +#define SO_ATTACH_BPF 0x402B +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig index dc939de9b5b0d8fa8022c01f14acc630d7afece8..b4c4b469e320e56e20482d782fd6b929b90eb8cf 100644 --- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig +++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig @@ -100,7 +100,6 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m # CONFIG_NET_VENDOR_3COM is not set CONFIG_FS_ENET=y diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig index e5a648115ada0c6396f65c12a1de19622ee95099..7cb9719abf3dd0c264c9621443c7a2a10fd981e4 100644 --- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig +++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig @@ -113,7 +113,6 @@ CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig index 8317b6010ba6b288503ae2b2ccf6bea0833b31d4..ecabf625d2497d0d37d2ff77ee5e27c018bf8b22 100644 --- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig @@ -114,7 +114,6 @@ CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig index 124d66f0282c8ef9cd1a222e49911beebaaf8cac..4a4a86fb0d3d240d7a07097a0fbeabd00d3101ab 100644 --- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig @@ -165,7 +165,6 @@ CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig index 1e151594c691585bb2948598587abac4d0118b30..99ea8746bbafcb9d3f2af17a858a9d6228afc598 100644 --- a/arch/powerpc/configs/86xx/sbc8641d_defconfig +++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig @@ -167,7 +167,6 @@ CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y CONFIG_SLIP_MODE_SLIP6=y CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig index 59734916986a9e53cd17ec2b1c9a27ec445d3b85..8a08d6dcb0b4734939af242356509d094cffe88c 100644 --- a/arch/powerpc/configs/c2k_defconfig +++ b/arch/powerpc/configs/c2k_defconfig @@ -211,7 +211,6 @@ CONFIG_MV643XX_ETH=y # CONFIG_NETDEV_10000 is not set # CONFIG_ATM_DRIVERS is not set CONFIG_NETCONSOLE=m -CONFIG_NETPOLL_TRAP=y # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 20bc5e2d368d05601fd267ac81c2b375c9f95e6d..5830d735c5c3a56b948f2eab77fb4c88c2e50ff8 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -154,7 +154,6 @@ CONFIG_WINDFARM_PM121=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m CONFIG_VIRTIO_NET=m CONFIG_VHOST_NET=m diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index c3a3269b08657d5c29208030dd55a978da3a6308..67885b2d70aae67317f920384783d8d3c3d112e3 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -103,7 +103,6 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m CONFIG_VORTEX=y CONFIG_ACENIC=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index fec5870f18180140ff7feef928073b461f2e5440..ad6d6b5af7d7b26a6cf7d3637330d0fd0cfa4377 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -629,7 +629,6 @@ CONFIG_SLIP_SMART=y CONFIG_NET_FC=y CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL_TRAP=y CONFIG_VIRTIO_NET=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_JOYDEV=m diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index dd2a9cab4b5012b69c00694c13347f8a52824f9c..1f97364017c747ea474aafef26fba1cc437bb915 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -133,7 +133,6 @@ CONFIG_DM_UEVENT=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m CONFIG_VIRTIO_NET=m CONFIG_VHOST_NET=m diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig index d2008887eb8c7fd8f3271b73438f8d5c578cb2b4..ac7ca585282795b6b0c9395083c049d90b37677c 100644 --- a/arch/powerpc/configs/pseries_le_defconfig +++ b/arch/powerpc/configs/pseries_le_defconfig @@ -134,7 +134,6 @@ CONFIG_DM_UEVENT=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m CONFIG_VIRTIO_NET=m CONFIG_VHOST_NET=m diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 31e8f59aff38894c0b16629b2ce7cc18fecbfb2c..382b28e364dcdd5130c70737a5afa4c609785bd4 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -1,6 +1,5 @@ generic-y += clkdev.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 6f8536208049f4c8f46708e45b4153927c36658a..1a5287759fc86cdd1918abfcf7b2645031ac77c6 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -204,6 +204,7 @@ #define PPC_INST_ERATSX_DOT 0x7c000127 /* Misc instructions for BPF compiler */ +#define PPC_INST_LBZ 0x88000000 #define PPC_INST_LD 0xe8000000 #define PPC_INST_LHZ 0xa0000000 #define PPC_INST_LHBRX 0x7c00062c diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index a9c3e2e18c054a1e952fe33599401de57c6a6544..c046666038f85b7c09f1f766affe7f561be1590b 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -87,4 +87,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 9aee27c582dcbcbd34a7f9acd2f0aabe6ffb3445..c406aa95b2bc5970b1aa7d3033cc8518faa93089 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -87,6 +87,9 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ ___PPC_RA(base) | ((i) & 0xfffc)) + +#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ + ___PPC_RA(base) | IMM_L(i)) #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ @@ -96,6 +99,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ ___PPC_RA(base) | ___PPC_RB(b)) /* Convenience helpers for the above with 'far' offsets: */ +#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \ + else { PPC_ADDIS(r, base, IMM_HA(i)); \ + PPC_LBZ(r, r, IMM_L(i)); } } while(0) + #define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ else { PPC_ADDIS(r, base, IMM_HA(i)); \ PPC_LD(r, r, IMM_L(i)); } } while(0) diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index cbae2dfd053cafc22540572652528d4dbedc91fe..1ca125b9c226070eefca744857b203648055b131 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -181,6 +181,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, } break; case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); if (ctx->pc_ret0 != -1) { @@ -190,9 +191,13 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_LI(r_ret, 0); PPC_JMP(exit_addr); } - PPC_DIVWU(r_scratch1, r_A, r_X); - PPC_MUL(r_scratch1, r_X, r_scratch1); - PPC_SUB(r_A, r_A, r_scratch1); + if (code == (BPF_ALU | BPF_MOD | BPF_X)) { + PPC_DIVWU(r_scratch1, r_A, r_X); + PPC_MUL(r_scratch1, r_X, r_scratch1); + PPC_SUB(r_A, r_A, r_scratch1); + } else { + PPC_DIVWU(r_A, r_A, r_X); + } break; case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ PPC_LI32(r_scratch2, K); @@ -200,22 +205,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_MUL(r_scratch1, r_scratch2, r_scratch1); PPC_SUB(r_A, r_A, r_scratch1); break; - case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ - ctx->seen |= SEEN_XREG; - PPC_CMPWI(r_X, 0); - if (ctx->pc_ret0 != -1) { - PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); - } else { - /* - * Exit, returning 0; first pass hits here - * (longer worst-case code size). - */ - PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); - PPC_LI(r_ret, 0); - PPC_JMP(exit_addr); - } - PPC_DIVWU(r_A, r_A, r_X); - break; case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ if (K == 1) break; @@ -361,6 +350,11 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, protocol)); break; case BPF_ANC | SKF_AD_IFINDEX: + case BPF_ANC | SKF_AD_HATYPE: + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + ifindex) != 4); + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + type) != 2); PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, dev)); PPC_CMPDI(r_scratch1, 0); @@ -368,14 +362,18 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); } else { /* Exit, returning 0; first pass hits here. */ - PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); + PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12); PPC_LI(r_ret, 0); PPC_JMP(exit_addr); } - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, - ifindex) != 4); - PPC_LWZ_OFFS(r_A, r_scratch1, + if (code == (BPF_ANC | SKF_AD_IFINDEX)) { + PPC_LWZ_OFFS(r_A, r_scratch1, offsetof(struct net_device, ifindex)); + } else { + PPC_LHZ_OFFS(r_A, r_scratch1, + offsetof(struct net_device, type)); + } + break; case BPF_ANC | SKF_AD_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); @@ -407,6 +405,11 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, queue_mapping)); break; + case BPF_ANC | SKF_AD_PKTTYPE: + PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET()); + PPC_ANDI(r_A, r_A, PKT_TYPE_MAX); + PPC_SRWI(r_A, r_A, 5); + break; case BPF_ANC | SKF_AD_CPU: #ifdef CONFIG_SMP /* diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 773f866765882fc3de528298c3e97ff9095f5f72..c631f98fd5242b1837fcc0525286fdcd9ec1771f 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -1,7 +1,6 @@ generic-y += clkdev.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index e031332096d7c7b23b5953680289e8f3bcc3b378..296942d56e6a077b2411cb3738b68e66870d3152 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -86,4 +86,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _ASM_SOCKET_H */ diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 46461c19f284fbeef42e2f4bdbd3b1381613cac8..83ed116d414c1a9e66405d3f079eb708f8390759 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -5,7 +5,6 @@ header-y += generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h -generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 5a6c9acff0d21eea4fcb802624eecbe854da9400..654ebb6bd5d8daddc6832b81a1d69d1de4bb404a 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -8,7 +8,6 @@ generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h generic-y += fcntl.h -generic-y += hash.h generic-y += ioctl.h generic-y += ipcbuf.h generic-y += irq_regs.h diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index f5f94ce1692c42ddab2088c1c0a8d480a6294bfa..94f36e7086a7699ae4ce5f9c6cafcb2391759243 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h -generic-y += hash.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += linkage.h diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index d758c8d8f47d90b9cefc18fc5ba973692b122a41..fb124feb363b25cddb1196004250cfa11f62b5c5 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -247,6 +247,25 @@ struct vio_net_desc { struct ldc_trans_cookie cookies[0]; }; +struct vio_net_dext { + u8 flags; +#define VNET_PKT_HASH 0x01 +#define VNET_PKT_HCK_IPV4_HDRCKSUM 0x02 +#define VNET_PKT_HCK_FULLCKSUM 0x04 +#define VNET_PKT_IPV4_LSO 0x08 +#define VNET_PKT_HCK_IPV4_HDRCKSUM_OK 0x10 +#define VNET_PKT_HCK_FULLCKSUM_OK 0x20 + + u8 vnet_hashval; + u16 ipv4_lso_mss; + u32 resv3; +}; + +static inline struct vio_net_dext *vio_net_ext(struct vio_net_desc *desc) +{ + return (struct vio_net_dext *)&desc->cookies[2]; +} + #define VIO_MAX_RING_COOKIES 24 struct vio_dring_state { diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 54d9608681b6947ae25dab008f808841d96125c0..e6a16c40be5f0d548ca6c0a5127b0c1a85dca890 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -76,6 +76,11 @@ #define SO_BPF_EXTENSIONS 0x0032 +#define SO_INCOMING_CPU 0x0033 + +#define SO_ATTACH_BPF 0x0034 +#define SO_DETACH_BPF SO_DETACH_FILTER + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 91de7dd7427ff60601cdf7c056337ee5aaf4425d..37dc9364c4a12485b513429af8288e65a725db37 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -218,7 +218,6 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index c7702b7ab7a55eec58e24a54a3e2146df17ea9fb..76a2781dec2c879bf1dde6d861bd22155094ca0b 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -337,7 +337,6 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index e6462b8a62842534f8e8ffe9eb628e586aaf8ec3..b4c488b657456f3e5a4fa4b74c40a3a427b5cc28 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -11,7 +11,6 @@ generic-y += errno.h generic-y += exec.h generic-y += fb.h generic-y += fcntl.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 244b12c8cb391b9e7fec48c2846a818a9e4db93e..9176fa11d49b917e8d417b1fcdc7daaa271d9cfd 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -10,7 +10,6 @@ generic-y += exec.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += io.h generic-y += irq_regs.h diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 5a2bb53faa42137607feafad23e3896e9249d078..3e0c19d0f4c5d9e5f4bef9d8cbff9283f9259738 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += fcntl.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h deleted file mode 100644 index e8c58f88b1d41c664e13da824d1f548f1ceb9920..0000000000000000000000000000000000000000 --- a/arch/x86/include/asm/hash.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ASM_X86_HASH_H -#define _ASM_X86_HASH_H - -struct fast_hash_ops; -extern void setup_arch_fast_hash(struct fast_hash_ops *ops); - -#endif /* _ASM_X86_HASH_H */ diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index db92793b7e23edaf7a252fe3d72daf4f893d5c31..1530afb07c85443aac9c6e1949efd4d58e8fb51a 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -23,7 +23,7 @@ lib-y += memcpy_$(BITS).o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o -obj-y += msr.o msr-reg.o msr-reg-export.o hash.o +obj-y += msr.o msr-reg.o msr-reg-export.o ifeq ($(CONFIG_X86_32),y) obj-y += atomic64_32.o diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c deleted file mode 100644 index ff4fa51a5b1f5c0ddffcfa64ce72556b012b084b..0000000000000000000000000000000000000000 --- a/arch/x86/lib/hash.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Some portions derived from code covered by the following notice: - * - * Copyright (c) 2010-2013 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -#include -#include -#include - -static inline u32 crc32_u32(u32 crc, u32 val) -{ -#ifdef CONFIG_AS_CRC32 - asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val)); -#else - asm (".byte 0xf2, 0x0f, 0x38, 0xf1, 0xc1" : "+a" (crc) : "c" (val)); -#endif - return crc; -} - -static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) -{ - const u32 *p32 = (const u32 *) data; - u32 i, tmp = 0; - - for (i = 0; i < len / 4; i++) - seed = crc32_u32(seed, *p32++); - - switch (len & 3) { - case 3: - tmp |= *((const u8 *) p32 + 2) << 16; - /* fallthrough */ - case 2: - tmp |= *((const u8 *) p32 + 1) << 8; - /* fallthrough */ - case 1: - tmp |= *((const u8 *) p32); - seed = crc32_u32(seed, tmp); - break; - } - - return seed; -} - -static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) -{ - const u32 *p32 = (const u32 *) data; - u32 i; - - for (i = 0; i < len; i++) - seed = crc32_u32(seed, *p32++); - - return seed; -} - -void __init setup_arch_fast_hash(struct fast_hash_ops *ops) -{ - if (cpu_has_xmm4_2) { - ops->hash = intel_crc4_2_hash; - ops->hash2 = intel_crc4_2_hash2; - } -} diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 3f627345d51cbd1a3dd453bb37cc82d7f75d5bb0..987514396c1e443376bffe70e050b72f86509102 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -24,7 +24,7 @@ extern u8 sk_load_byte_positive_offset[]; extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; extern u8 sk_load_byte_negative_offset[]; -static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) +static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { if (len == 1) *ptr = bytes; @@ -52,12 +52,12 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) #define EMIT4_off32(b1, b2, b3, b4, off) \ do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) -static inline bool is_imm8(int value) +static bool is_imm8(int value) { return value <= 127 && value >= -128; } -static inline bool is_simm32(s64 value) +static bool is_simm32(s64 value) { return value == (s64) (s32) value; } @@ -94,7 +94,7 @@ static int bpf_size_to_x86_bytes(int bpf_size) #define X86_JGE 0x7D #define X86_JG 0x7F -static inline void bpf_flush_icache(void *start, void *end) +static void bpf_flush_icache(void *start, void *end) { mm_segment_t old_fs = get_fs(); @@ -133,24 +133,24 @@ static const int reg2hex[] = { * which need extra byte of encoding. * rax,rcx,...,rbp have simpler encoding */ -static inline bool is_ereg(u32 reg) +static bool is_ereg(u32 reg) { - if (reg == BPF_REG_5 || reg == AUX_REG || - (reg >= BPF_REG_7 && reg <= BPF_REG_9)) - return true; - else - return false; + return (1 << reg) & (BIT(BPF_REG_5) | + BIT(AUX_REG) | + BIT(BPF_REG_7) | + BIT(BPF_REG_8) | + BIT(BPF_REG_9)); } /* add modifiers if 'reg' maps to x64 registers r8..r15 */ -static inline u8 add_1mod(u8 byte, u32 reg) +static u8 add_1mod(u8 byte, u32 reg) { if (is_ereg(reg)) byte |= 1; return byte; } -static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) +static u8 add_2mod(u8 byte, u32 r1, u32 r2) { if (is_ereg(r1)) byte |= 1; @@ -160,13 +160,13 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) } /* encode 'dst_reg' register into x64 opcode 'byte' */ -static inline u8 add_1reg(u8 byte, u32 dst_reg) +static u8 add_1reg(u8 byte, u32 dst_reg) { return byte + reg2hex[dst_reg]; } /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ -static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) +static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) { return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); } @@ -178,7 +178,7 @@ static void jit_fill_hole(void *area, unsigned int size) } struct jit_context { - unsigned int cleanup_addr; /* epilogue code offset */ + int cleanup_addr; /* epilogue code offset */ bool seen_ld_abs; }; @@ -192,6 +192,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, struct bpf_insn *insn = bpf_prog->insnsi; int insn_cnt = bpf_prog->len; bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); + bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; int i; int proglen = 0; @@ -854,10 +855,11 @@ common_load: goto common_load; case BPF_JMP | BPF_EXIT: - if (i != insn_cnt - 1) { + if (seen_exit) { jmp_offset = ctx->cleanup_addr - addrs[i]; goto emit_jmp; } + seen_exit = true; /* update cleanup_addr */ ctx->cleanup_addr = proglen; /* mov rbx, qword ptr [rbp-X] */ diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 105d38922c442624fd4bfadb5a5e475857adf041..86a9ab2e2ca92c3b98ff6c2540528bcc85cb0d38 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -9,7 +9,6 @@ generic-y += errno.h generic-y += exec.h generic-y += fcntl.h generic-y += hardirq.h -generic-y += hash.h generic-y += ioctl.h generic-y += irq_regs.h generic-y += irq_work.h diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index 39acec0cf0b1d500c1c40f9b523ef3a9a142c2f1..4120af08616055708c8b64c70d4d256100d49ac7 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -91,4 +91,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* _XTENSA_SOCKET_H */ diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 6a3ad801158531435848a40936a9bd74a7a522e6..bc21f520d489d0d3d445269ff282faf1b80c1905 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -399,7 +399,7 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) { struct cmsghdr *cmsg; - for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { + for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_ALG) diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 850246206b1258a697f83e86c39acc9b0a17f973..83cd2cc49c9f2647755282a2ba1cf4b5f2e122b9 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -42,7 +42,7 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock, struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; unsigned long iovlen; - struct iovec *iov; + const struct iovec *iov; long copied = 0; int err; @@ -58,7 +58,7 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock, ctx->more = 0; - for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; + for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; char __user *from = iov->iov_base; @@ -174,7 +174,7 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock, goto unlock; } - err = memcpy_toiovec(msg->msg_iov, ctx->result, len); + err = memcpy_to_msg(msg, ctx->result, len); unlock: release_sock(sk); diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 83187f497c7c65dddd2248170a50976e568d82e5..4f45dab24648f267ae562dc2a6d266475ec11a14 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -298,9 +298,9 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock, len = min_t(unsigned long, len, PAGE_SIZE - sg->offset - sg->length); - err = memcpy_fromiovec(page_address(sg_page(sg)) + - sg->offset + sg->length, - msg->msg_iov, len); + err = memcpy_from_msg(page_address(sg_page(sg)) + + sg->offset + sg->length, + msg, len); if (err) goto unlock; @@ -337,8 +337,8 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock, if (!sg_page(sg + i)) goto unlock; - err = memcpy_fromiovec(page_address(sg_page(sg + i)), - msg->msg_iov, plen); + err = memcpy_from_msg(page_address(sg_page(sg + i)), + msg, plen); if (err) { __free_page(sg_page(sg + i)); sg_assign_page(sg + i, NULL); @@ -429,13 +429,13 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, struct skcipher_sg_list *sgl; struct scatterlist *sg; unsigned long iovlen; - struct iovec *iov; + const struct iovec *iov; int err = -EAGAIN; int used; long copied = 0; lock_sock(sk); - for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; + for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; char __user *from = iov->iov_base; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index d65975aba4ecfdd3297d0fc1ad8151f6560b1fda..c7fab3ee14eef1e28af86e69a253925d30f491ea 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -356,6 +356,8 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, if (skb) { paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(eni_dev->pci_dev, paddr)) + goto dma_map_error; ENI_PRV_PADDR(skb) = paddr; if (paddr & 3) printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d has " @@ -481,6 +483,7 @@ trouble: if (paddr) pci_unmap_single(eni_dev->pci_dev,paddr,skb->len, PCI_DMA_FROMDEVICE); +dma_map_error: if (skb) dev_kfree_skb_irq(skb); return -1; } diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index b6412b2d748dd11d6236963432c3984dd537f704..314ae4032f3e7347ee2f4d391bab32964fad0594 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -24,6 +24,7 @@ struct bcma_bus; /* main.c */ bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout); +void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core); int bcma_bus_register(struct bcma_bus *bus); void bcma_bus_unregister(struct bcma_bus *bus); int __init bcma_bus_early_register(struct bcma_bus *bus, diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c index b068f98920a8385446f03ee3e6fe32d369aedc3c..19f679667ca48d46626b04c9a2f43db4000554f2 100644 --- a/drivers/bcma/driver_chipcommon.c +++ b/drivers/bcma/driver_chipcommon.c @@ -339,7 +339,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc) return; } - irq = bcma_core_irq(cc->core); + irq = bcma_core_irq(cc->core, 0); /* Determine the registers of the UARTs */ cc->nr_serial_ports = (cc->capabilities & BCMA_CC_CAP_NRUART); diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 706b9ae0dcfbdb82cf73ae41139d2e4b82ab4693..598a6cd9028a70a239f283101d8f65a33e9fdf77 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -152,7 +152,7 @@ static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc) handle_simple_irq); } - hwirq = bcma_core_irq(cc->core); + hwirq = bcma_core_irq(cc->core, 0); err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio", cc); if (err) @@ -183,7 +183,7 @@ static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc) return; bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO); - free_irq(bcma_core_irq(cc->core), cc); + free_irq(bcma_core_irq(cc->core, 0), cc); for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_find_mapping(cc->irq_domain, gpio); diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 004d6aa671cecbf82f0df9da6078f868683de361..5ec69c3d409dd31801f928230dc55109186626f0 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -115,7 +115,7 @@ static u32 bcma_core_mips_irqflag(struct bcma_device *dev) * If disabled, 5 is returned. * If not supported, 6 is returned. */ -static unsigned int bcma_core_mips_irq(struct bcma_device *dev) +unsigned int bcma_core_mips_irq(struct bcma_device *dev) { struct bcma_device *mdev = dev->bus->drv_mips.core; u32 irqflag; @@ -133,13 +133,6 @@ static unsigned int bcma_core_mips_irq(struct bcma_device *dev) return 5; } -unsigned int bcma_core_irq(struct bcma_device *dev) -{ - unsigned int mips_irq = bcma_core_mips_irq(dev); - return mips_irq <= 4 ? mips_irq + 2 : 0; -} -EXPORT_SYMBOL(bcma_core_irq); - static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq) { unsigned int oldirq = bcma_core_mips_irq(dev); @@ -423,7 +416,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore) break; default: list_for_each_entry(core, &bus->cores, list) { - core->irq = bcma_core_irq(core); + core->irq = bcma_core_irq(core, 0); } bcma_err(bus, "Unknown device (0x%x) found, can not configure IRQs\n", diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c index c3d7b03c2fdc71474b954326a96f8ae183c1e3a8..c8a6b741967b390e20470f15d55d493fa9bc8ce8 100644 --- a/drivers/bcma/driver_pci_host.c +++ b/drivers/bcma/driver_pci_host.c @@ -593,7 +593,7 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev) pr_info("PCI: Fixing up device %s\n", pci_name(dev)); /* Fix up interrupt lines */ - dev->irq = bcma_core_irq(pc_host->pdev->core); + dev->irq = bcma_core_irq(pc_host->pdev->core, 0); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); readrq = pcie_get_readrq(dev); @@ -617,6 +617,6 @@ int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev) pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host, pci_ops); - return bcma_core_irq(pc_host->pdev->core); + return bcma_core_irq(pc_host->pdev->core, 0); } EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 1000955ce09d870db23979e3950d75e32b75cfc1..534e1337766d2d0ba9d7e8d220a30434da23ef5d 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -11,6 +11,7 @@ #include #include #include +#include MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); MODULE_LICENSE("GPL"); @@ -153,6 +154,46 @@ static struct device_node *bcma_of_find_child_device(struct platform_device *par return NULL; } +static int bcma_of_irq_parse(struct platform_device *parent, + struct bcma_device *core, + struct of_phandle_args *out_irq, int num) +{ + __be32 laddr[1]; + int rc; + + if (core->dev.of_node) { + rc = of_irq_parse_one(core->dev.of_node, num, out_irq); + if (!rc) + return rc; + } + + out_irq->np = parent->dev.of_node; + out_irq->args_count = 1; + out_irq->args[0] = num; + + laddr[0] = cpu_to_be32(core->addr); + return of_irq_parse_raw(laddr, out_irq); +} + +static unsigned int bcma_of_get_irq(struct platform_device *parent, + struct bcma_device *core, int num) +{ + struct of_phandle_args out_irq; + int ret; + + if (!parent || !parent->dev.of_node) + return 0; + + ret = bcma_of_irq_parse(parent, core, &out_irq, num); + if (ret) { + bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n", + ret); + return 0; + } + + return irq_create_of_mapping(&out_irq); +} + static void bcma_of_fill_device(struct platform_device *parent, struct bcma_device *core) { @@ -161,18 +202,47 @@ static void bcma_of_fill_device(struct platform_device *parent, node = bcma_of_find_child_device(parent, core); if (node) core->dev.of_node = node; + + core->irq = bcma_of_get_irq(parent, core, 0); } #else static void bcma_of_fill_device(struct platform_device *parent, struct bcma_device *core) { } +static inline unsigned int bcma_of_get_irq(struct platform_device *parent, + struct bcma_device *core, int num) +{ + return 0; +} #endif /* CONFIG_OF */ -static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) +unsigned int bcma_core_irq(struct bcma_device *core, int num) { - int err; + struct bcma_bus *bus = core->bus; + unsigned int mips_irq; + + switch (bus->hosttype) { + case BCMA_HOSTTYPE_PCI: + return bus->host_pci->irq; + case BCMA_HOSTTYPE_SOC: + if (bus->drv_mips.core && num == 0) { + mips_irq = bcma_core_mips_irq(core); + return mips_irq <= 4 ? mips_irq + 2 : 0; + } + if (bus->host_pdev) + return bcma_of_get_irq(bus->host_pdev, core, num); + return 0; + case BCMA_HOSTTYPE_SDIO: + return 0; + } + return 0; +} +EXPORT_SYMBOL(bcma_core_irq); + +void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) +{ core->dev.release = bcma_release_core_dev; core->dev.bus = &bcma_bus_type; dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); @@ -196,6 +266,11 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) case BCMA_HOSTTYPE_SDIO: break; } +} + +static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) +{ + int err; err = device_register(&core->dev); if (err) { diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index 14b56561a36f10713cc5c470b838b25da4687999..917520776879255c94350f5458e079ecb07df062 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -505,6 +505,7 @@ int bcma_bus_scan(struct bcma_bus *bus) bus->nr_cores++; other_core = bcma_find_core_reverse(bus, core->id.id); core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1; + bcma_prepare_core(bus, core); bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n", core->core_index, bcma_device_name(&core->id), diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 4547dc238fc7022967f9ebc380b2e8e2d22d19ae..364f080768d02b2a13ea621da81f880685c39579 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -210,6 +210,7 @@ config BT_MRVL_SDIO tristate "Marvell BT-over-SDIO driver" depends on BT_MRVL && MMC select FW_LOADER + select WANT_DEV_COREDUMP help The driver for Marvell Bluetooth chipsets with SDIO interface. diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index d85ced27ebd58f5a40b1881032daced2aedf82b0..fce7588962807771a29ce1336ecc559f7e235216 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -79,6 +79,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x0489, 0xe057) }, { USB_DEVICE(0x0489, 0xe056) }, { USB_DEVICE(0x0489, 0xe05f) }, + { USB_DEVICE(0x0489, 0xe078) }, { USB_DEVICE(0x04c5, 0x1330) }, { USB_DEVICE(0x04CA, 0x3004) }, { USB_DEVICE(0x04CA, 0x3005) }, @@ -105,6 +106,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x13d3, 0x3393) }, { USB_DEVICE(0x13d3, 0x3402) }, + { USB_DEVICE(0x13d3, 0x3408) }, { USB_DEVICE(0x13d3, 0x3432) }, /* Atheros AR5BBU12 with sflash firmware */ @@ -130,6 +132,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, @@ -156,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c index 023d35e3c7a75b573a52d99cbda5bd98c64e017f..1828ed8cae7a32119cba96b51c55c921bdea71d1 100644 --- a/drivers/bluetooth/btmrvl_debugfs.c +++ b/drivers/bluetooth/btmrvl_debugfs.c @@ -167,6 +167,35 @@ static const struct file_operations btmrvl_hscmd_fops = { .llseek = default_llseek, }; +static ssize_t btmrvl_fwdump_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + bool result; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + if (strtobool(buf, &result)) + return -EINVAL; + + if (!result) + return -EINVAL; + + btmrvl_firmware_dump(priv); + + return count; +} + +static const struct file_operations btmrvl_fwdump_fops = { + .write = btmrvl_fwdump_write, + .open = simple_open, + .llseek = default_llseek, +}; + void btmrvl_debugfs_init(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); @@ -197,6 +226,8 @@ void btmrvl_debugfs_init(struct hci_dev *hdev) priv, &btmrvl_hscmd_fops); debugfs_create_file("hscfgcmd", 0644, dbg->config_dir, priv, &btmrvl_hscfgcmd_fops); + debugfs_create_file("fw_dump", 0200, dbg->config_dir, + priv, &btmrvl_fwdump_fops); dbg->status_dir = debugfs_create_dir("status", hdev->debugfs); debugfs_create_u8("curpsmode", 0444, dbg->status_dir, diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index 38ad66289ad6206204138a31d4d4bda83a4297c9..330f8f84928d4d883be8b246eb78b90bad7f6159 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -32,6 +32,24 @@ /* Time to wait for command response in millisecond */ #define WAIT_UNTIL_CMD_RESP 5000 +enum rdwr_status { + RDWR_STATUS_SUCCESS = 0, + RDWR_STATUS_FAILURE = 1, + RDWR_STATUS_DONE = 2 +}; + +#define FW_DUMP_MAX_NAME_LEN 8 +#define FW_DUMP_HOST_READY 0xEE +#define FW_DUMP_DONE 0xFF +#define FW_DUMP_READ_DONE 0xFE + +struct memory_type_mapping { + u8 mem_name[FW_DUMP_MAX_NAME_LEN]; + u8 *mem_ptr; + u32 mem_size; + u8 done_flag; +}; + struct btmrvl_thread { struct task_struct *task; wait_queue_head_t wait_q; @@ -81,6 +99,7 @@ struct btmrvl_private { u8 *payload, u16 nb); int (*hw_wakeup_firmware) (struct btmrvl_private *priv); int (*hw_process_int_status) (struct btmrvl_private *priv); + void (*firmware_dump)(struct btmrvl_private *priv); spinlock_t driver_lock; /* spinlock used by driver */ #ifdef CONFIG_DEBUG_FS void *debugfs_data; @@ -151,6 +170,7 @@ int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv); int btmrvl_enable_ps(struct btmrvl_private *priv); int btmrvl_prepare_command(struct btmrvl_private *priv); int btmrvl_enable_hs(struct btmrvl_private *priv); +void btmrvl_firmware_dump(struct btmrvl_private *priv); #ifdef CONFIG_DEBUG_FS void btmrvl_debugfs_init(struct hci_dev *hdev); diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 1d7db20648899103578d3badeb84aa6f38ec42f6..30939c993d94cc7e832063501f1bca4a3dd4d530 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "btmrvl_drv.h" #include "btmrvl_sdio.h" @@ -41,6 +42,11 @@ void btmrvl_interrupt(struct btmrvl_private *priv) priv->adapter->int_count++; + if (priv->adapter->hs_state == HS_ACTIVATED) { + BT_DBG("BT: HS DEACTIVATED in ISR!"); + priv->adapter->hs_state = HS_DEACTIVATED; + } + wake_up_interruptible(&priv->main_thread.wait_q); } EXPORT_SYMBOL_GPL(btmrvl_interrupt); @@ -209,7 +215,7 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd) ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1); if (ret) - BT_ERR("module_cfg_cmd(%x) failed\n", subcmd); + BT_ERR("module_cfg_cmd(%x) failed", subcmd); return ret; } @@ -245,7 +251,7 @@ int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv) ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2); if (ret) - BT_ERR("HSCFG command failed\n"); + BT_ERR("HSCFG command failed"); return ret; } @@ -263,7 +269,7 @@ int btmrvl_enable_ps(struct btmrvl_private *priv) ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, ¶m, 1); if (ret) - BT_ERR("PSMODE command failed\n"); + BT_ERR("PSMODE command failed"); return 0; } @@ -276,7 +282,7 @@ int btmrvl_enable_hs(struct btmrvl_private *priv) ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0); if (ret) { - BT_ERR("Host sleep enable command failed\n"); + BT_ERR("Host sleep enable command failed"); return ret; } @@ -323,12 +329,19 @@ int btmrvl_prepare_command(struct btmrvl_private *priv) } else { ret = priv->hw_wakeup_firmware(priv); priv->adapter->hs_state = HS_DEACTIVATED; + BT_DBG("BT: HS DEACTIVATED due to host activity!"); } } return ret; } +void btmrvl_firmware_dump(struct btmrvl_private *priv) +{ + if (priv->firmware_dump) + priv->firmware_dump(priv); +} + static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb) { int ret = 0; @@ -487,34 +500,36 @@ static int btmrvl_download_cal_data(struct btmrvl_private *priv, ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data, BT_CAL_HDR_LEN + len); if (ret) - BT_ERR("Failed to download caibration data\n"); + BT_ERR("Failed to download caibration data"); return 0; } -static int btmrvl_cal_data_dt(struct btmrvl_private *priv) +static int btmrvl_check_device_tree(struct btmrvl_private *priv) { struct device_node *dt_node; u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE]; - const char name[] = "btmrvl_caldata"; - const char property[] = "btmrvl,caldata"; int ret; - - dt_node = of_find_node_by_name(NULL, name); - if (!dt_node) - return -ENODEV; - - ret = of_property_read_u8_array(dt_node, property, - cal_data + BT_CAL_HDR_LEN, - BT_CAL_DATA_SIZE); - if (ret) - return ret; - - BT_DBG("Use cal data from device tree"); - ret = btmrvl_download_cal_data(priv, cal_data, BT_CAL_DATA_SIZE); - if (ret) { - BT_ERR("Fail to download calibrate data"); - return ret; + u32 val; + + for_each_compatible_node(dt_node, NULL, "btmrvl,cfgdata") { + ret = of_property_read_u32(dt_node, "btmrvl,gpio-gap", &val); + if (!ret) + priv->btmrvl_dev.gpio_gap = val; + + ret = of_property_read_u8_array(dt_node, "btmrvl,cal-data", + cal_data + BT_CAL_HDR_LEN, + BT_CAL_DATA_SIZE); + if (ret) + return ret; + + BT_DBG("Use cal data from device tree"); + ret = btmrvl_download_cal_data(priv, cal_data, + BT_CAL_DATA_SIZE); + if (ret) { + BT_ERR("Fail to download calibrate data"); + return ret; + } } return 0; @@ -526,14 +541,15 @@ static int btmrvl_setup(struct hci_dev *hdev) btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ); - btmrvl_cal_data_dt(priv); + priv->btmrvl_dev.gpio_gap = 0xffff; + + btmrvl_check_device_tree(priv); btmrvl_pscan_window_reporting(priv, 0x01); priv->btmrvl_dev.psmode = 1; btmrvl_enable_ps(priv); - priv->btmrvl_dev.gpio_gap = 0xffff; btmrvl_send_hscfg_cmd(priv); return 0; diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 550bce089fa6c428467882d23c977ed40f37a532..0057c0b7a7761e4053e1183f467502b37dbc93a1 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -33,6 +34,24 @@ #define VERSION "1.0" +static struct memory_type_mapping mem_type_mapping_tbl[] = { + {"ITCM", NULL, 0, 0xF0}, + {"DTCM", NULL, 0, 0xF1}, + {"SQRAM", NULL, 0, 0xF2}, + {"APU", NULL, 0, 0xF3}, + {"CIU", NULL, 0, 0xF4}, + {"ICU", NULL, 0, 0xF5}, + {"MAC", NULL, 0, 0xF6}, + {"EXT7", NULL, 0, 0xF7}, + {"EXT8", NULL, 0, 0xF8}, + {"EXT9", NULL, 0, 0xF9}, + {"EXT10", NULL, 0, 0xFA}, + {"EXT11", NULL, 0, 0xFB}, + {"EXT12", NULL, 0, 0xFC}, + {"EXT13", NULL, 0, 0xFD}, + {"EXTLAST", NULL, 0, 0xFE}, +}; + /* The btmrvl_sdio_remove() callback function is called * when user removes this module from kernel space or ejects * the card from the slot. The driver handles these 2 cases @@ -122,6 +141,9 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = { .int_read_to_clear = true, .host_int_rsr = 0x01, .card_misc_cfg = 0xcc, + .fw_dump_ctrl = 0xe2, + .fw_dump_start = 0xe3, + .fw_dump_end = 0xea, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { @@ -130,6 +152,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { .reg = &btmrvl_reg_8688, .support_pscan_win_report = false, .sd_blksz_fw_dl = 64, + .supports_fw_dump = false, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = { @@ -138,6 +161,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = { .reg = &btmrvl_reg_87xx, .support_pscan_win_report = false, .sd_blksz_fw_dl = 256, + .supports_fw_dump = false, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = { @@ -146,6 +170,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = { .reg = &btmrvl_reg_87xx, .support_pscan_win_report = false, .sd_blksz_fw_dl = 256, + .supports_fw_dump = false, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8887 = { @@ -154,6 +179,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8887 = { .reg = &btmrvl_reg_8887, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, + .supports_fw_dump = false, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { @@ -162,6 +188,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { .reg = &btmrvl_reg_8897, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, + .supports_fw_dump = true, }; static const struct sdio_device_id btmrvl_sdio_ids[] = { @@ -764,8 +791,8 @@ static void btmrvl_sdio_interrupt(struct sdio_func *func) card = sdio_get_drvdata(func); if (!card || !card->priv) { - BT_ERR("sbi_interrupt(%p) card or priv is " - "NULL, card=%p\n", func, card); + BT_ERR("sbi_interrupt(%p) card or priv is NULL, card=%p", + func, card); return; } @@ -1080,6 +1107,277 @@ static int btmrvl_sdio_wakeup_fw(struct btmrvl_private *priv) return ret; } +static void btmrvl_sdio_dump_regs(struct btmrvl_private *priv) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret = 0; + unsigned int reg, reg_start, reg_end; + char buf[256], *ptr; + u8 loop, func, data; + int MAX_LOOP = 2; + + btmrvl_sdio_wakeup_fw(priv); + sdio_claim_host(card->func); + + for (loop = 0; loop < MAX_LOOP; loop++) { + memset(buf, 0, sizeof(buf)); + ptr = buf; + + if (loop == 0) { + /* Read the registers of SDIO function0 */ + func = loop; + reg_start = 0; + reg_end = 9; + } else { + func = 2; + reg_start = 0; + reg_end = 0x09; + } + + ptr += sprintf(ptr, "SDIO Func%d (%#x-%#x): ", + func, reg_start, reg_end); + for (reg = reg_start; reg <= reg_end; reg++) { + if (func == 0) + data = sdio_f0_readb(card->func, reg, &ret); + else + data = sdio_readb(card->func, reg, &ret); + + if (!ret) { + ptr += sprintf(ptr, "%02x ", data); + } else { + ptr += sprintf(ptr, "ERR"); + break; + } + } + + BT_INFO("%s", buf); + } + + sdio_release_host(card->func); +} + +/* This function read/write firmware */ +static enum +rdwr_status btmrvl_sdio_rdwr_firmware(struct btmrvl_private *priv, + u8 doneflag) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret, tries; + u8 ctrl_data = 0; + + sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, + &ret); + + if (ret) { + BT_ERR("SDIO write err"); + return RDWR_STATUS_FAILURE; + } + + for (tries = 0; tries < MAX_POLL_TRIES; tries++) { + ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl, + &ret); + + if (ret) { + BT_ERR("SDIO read err"); + return RDWR_STATUS_FAILURE; + } + + if (ctrl_data == FW_DUMP_DONE) + break; + if (doneflag && ctrl_data == doneflag) + return RDWR_STATUS_DONE; + if (ctrl_data != FW_DUMP_HOST_READY) { + BT_INFO("The ctrl reg was changed, re-try again!"); + sdio_writeb(card->func, FW_DUMP_HOST_READY, + card->reg->fw_dump_ctrl, &ret); + if (ret) { + BT_ERR("SDIO write err"); + return RDWR_STATUS_FAILURE; + } + } + usleep_range(100, 200); + } + + if (ctrl_data == FW_DUMP_HOST_READY) { + BT_ERR("Fail to pull ctrl_data"); + return RDWR_STATUS_FAILURE; + } + + return RDWR_STATUS_SUCCESS; +} + +/* This function dump sdio register and memory data */ +static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret = 0; + unsigned int reg, reg_start, reg_end; + enum rdwr_status stat; + u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr; + u8 dump_num, idx, i, read_reg, doneflag = 0; + u32 memory_size, fw_dump_len = 0; + + /* dump sdio register first */ + btmrvl_sdio_dump_regs(priv); + + if (!card->supports_fw_dump) { + BT_ERR("Firmware dump not supported for this card!"); + return; + } + + for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { + struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; + } + entry->mem_size = 0; + } + + btmrvl_sdio_wakeup_fw(priv); + sdio_claim_host(card->func); + + BT_INFO("== btmrvl firmware dump start =="); + + stat = btmrvl_sdio_rdwr_firmware(priv, doneflag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + + reg = card->reg->fw_dump_start; + /* Read the number of the memories which will dump */ + dump_num = sdio_readb(card->func, reg, &ret); + + if (ret) { + BT_ERR("SDIO read memory length err"); + goto done; + } + + /* Read the length of every memory which will dump */ + for (idx = 0; idx < dump_num; idx++) { + struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; + + stat = btmrvl_sdio_rdwr_firmware(priv, doneflag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + + memory_size = 0; + reg = card->reg->fw_dump_start; + for (i = 0; i < 4; i++) { + read_reg = sdio_readb(card->func, reg, &ret); + if (ret) { + BT_ERR("SDIO read err"); + goto done; + } + memory_size |= (read_reg << i*8); + reg++; + } + + if (memory_size == 0) { + BT_INFO("Firmware dump finished!"); + break; + } + + BT_INFO("%s_SIZE=0x%x", entry->mem_name, memory_size); + entry->mem_ptr = vzalloc(memory_size + 1); + entry->mem_size = memory_size; + if (!entry->mem_ptr) { + BT_ERR("Vzalloc %s failed", entry->mem_name); + goto done; + } + + fw_dump_len += (strlen("========Start dump ") + + strlen(entry->mem_name) + + strlen("========\n") + + (memory_size + 1) + + strlen("\n========End dump========\n")); + + dbg_ptr = entry->mem_ptr; + end_ptr = dbg_ptr + memory_size; + + doneflag = entry->done_flag; + BT_INFO("Start %s output, please wait...", + entry->mem_name); + + do { + stat = btmrvl_sdio_rdwr_firmware(priv, doneflag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + + reg_start = card->reg->fw_dump_start; + reg_end = card->reg->fw_dump_end; + for (reg = reg_start; reg <= reg_end; reg++) { + *dbg_ptr = sdio_readb(card->func, reg, &ret); + if (ret) { + BT_ERR("SDIO read err"); + goto done; + } + if (dbg_ptr < end_ptr) + dbg_ptr++; + else + BT_ERR("Allocated buffer not enough"); + } + + if (stat != RDWR_STATUS_DONE) { + continue; + } else { + BT_INFO("%s done: size=0x%tx", + entry->mem_name, + dbg_ptr - entry->mem_ptr); + break; + } + } while (1); + } + + BT_INFO("== btmrvl firmware dump end =="); + +done: + sdio_release_host(card->func); + + if (fw_dump_len == 0) + return; + + fw_dump_data = vzalloc(fw_dump_len+1); + if (!fw_dump_data) { + BT_ERR("Vzalloc fw_dump_data fail!"); + return; + } + fw_dump_ptr = fw_dump_data; + + /* Dump all the memory data into single file, a userspace script will + be used to split all the memory data to multiple files*/ + BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start"); + for (idx = 0; idx < dump_num; idx++) { + struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + strcpy(fw_dump_ptr, "========Start dump "); + fw_dump_ptr += strlen("========Start dump "); + + strcpy(fw_dump_ptr, entry->mem_name); + fw_dump_ptr += strlen(entry->mem_name); + + strcpy(fw_dump_ptr, "========\n"); + fw_dump_ptr += strlen("========\n"); + + memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size); + fw_dump_ptr += entry->mem_size; + + strcpy(fw_dump_ptr, "\n========End dump========\n"); + fw_dump_ptr += strlen("\n========End dump========\n"); + + vfree(mem_type_mapping_tbl[idx].mem_ptr); + mem_type_mapping_tbl[idx].mem_ptr = NULL; + } + } + + /* fw_dump_data will be free in device coredump release function + after 5 min*/ + dev_coredumpv(&priv->btmrvl_dev.hcidev->dev, fw_dump_data, + fw_dump_len, GFP_KERNEL); + BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end"); +} + static int btmrvl_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { @@ -1103,6 +1401,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func, card->reg = data->reg; card->sd_blksz_fw_dl = data->sd_blksz_fw_dl; card->support_pscan_win_report = data->support_pscan_win_report; + card->supports_fw_dump = data->supports_fw_dump; } if (btmrvl_sdio_register_dev(card) < 0) { @@ -1134,6 +1433,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func, priv->hw_host_to_card = btmrvl_sdio_host_to_card; priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw; priv->hw_process_int_status = btmrvl_sdio_process_int_status; + priv->firmware_dump = btmrvl_sdio_dump_firmware; if (btmrvl_register_hdev(priv)) { BT_ERR("Register hdev failed!"); diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h index 453559f98a75e4a895bf1a1086e1d5e754c2509a..1a3bd064c44249a7f543d6e8c9eccdb7eefec92d 100644 --- a/drivers/bluetooth/btmrvl_sdio.h +++ b/drivers/bluetooth/btmrvl_sdio.h @@ -81,6 +81,9 @@ struct btmrvl_sdio_card_reg { bool int_read_to_clear; u8 host_int_rsr; u8 card_misc_cfg; + u8 fw_dump_ctrl; + u8 fw_dump_start; + u8 fw_dump_end; }; struct btmrvl_sdio_card { @@ -90,6 +93,7 @@ struct btmrvl_sdio_card { const char *firmware; const struct btmrvl_sdio_card_reg *reg; bool support_pscan_win_report; + bool supports_fw_dump; u16 sd_blksz_fw_dl; u8 rx_unit; struct btmrvl_private *priv; @@ -101,6 +105,7 @@ struct btmrvl_sdio_device { const struct btmrvl_sdio_card_reg *reg; const bool support_pscan_win_report; u16 sd_blksz_fw_dl; + bool supports_fw_dump; }; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index edfc17bfcd44e02a8c8fb8748687cf265770863d..31dd24ac99269ae36b04e1b05d20a80f4af65f0b 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -106,9 +106,12 @@ static const struct usb_device_id btusb_table[] = { { USB_DEVICE(0x0b05, 0x17b5) }, { USB_DEVICE(0x0b05, 0x17cb) }, { USB_DEVICE(0x413c, 0x8197) }, + { USB_DEVICE(0x13d3, 0x3404), + .driver_info = BTUSB_BCM_PATCHRAM }, /* Foxconn - Hon Hai */ - { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom devices with vendor specific id */ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01), @@ -156,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, @@ -182,6 +186,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ @@ -298,6 +303,8 @@ struct btusb_data { unsigned int sco_num; int isoc_altsetting; int suspend_count; + + int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); }; static inline void btusb_free_frags(struct btusb_data *data) @@ -589,7 +596,7 @@ static void btusb_bulk_complete(struct urb *urb) if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; - if (btusb_recv_bulk(data, urb->transfer_buffer, + if (data->recv_bulk(data, urb->transfer_buffer, urb->actual_length) < 0) { BT_ERR("%s corrupted ACL packet", hdev->name); hdev->stat.err_rx++; @@ -2011,6 +2018,8 @@ static int btusb_probe(struct usb_interface *intf, init_usb_anchor(&data->isoc_anchor); spin_lock_init(&data->rxlock); + data->recv_bulk = btusb_recv_bulk; + hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; @@ -2034,6 +2043,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_BCM_PATCHRAM) { hdev->setup = btusb_setup_bcm_patchram; hdev->set_bdaddr = btusb_set_bdaddr_bcm; + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); } if (id->driver_info & BTUSB_INTEL) { diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index 0bc8a6a6a14850744d27788b9850ddf9bacac99f..2ff6dfd2d3f0657e13853ec13b04a7b51cf765a6 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -74,7 +74,7 @@ static int ath_wakeup_ar3k(struct tty_struct *tty) status = tty->driver->ops->tiocmget(tty); - /* Disable Automatic RTSCTS */ + /* Enable Automatic RTSCTS */ ktermios.c_cflag |= CRTSCTS; status = tty_set_termios(tty, &ktermios); diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index a22838669b4ed227d447925ea45337f5efd9f72d..ec0fa7732c0d953c359c86f16a5f307a00f3d930 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -168,6 +168,27 @@ wakeup: hci_uart_tx_wakeup(hu); } +static void h5_peer_reset(struct hci_uart *hu) +{ + struct h5 *h5 = hu->priv; + + BT_ERR("Peer device has reset"); + + h5->state = H5_UNINITIALIZED; + + del_timer(&h5->timer); + + skb_queue_purge(&h5->rel); + skb_queue_purge(&h5->unrel); + skb_queue_purge(&h5->unack); + + h5->tx_seq = 0; + h5->tx_ack = 0; + + /* Send reset request to upper stack */ + hci_reset_dev(hu->hdev); +} + static int h5_open(struct hci_uart *hu) { struct h5 *h5; @@ -283,8 +304,12 @@ static void h5_handle_internal_rx(struct hci_uart *hu) conf_req[2] = h5_cfg_field(h5); if (memcmp(data, sync_req, 2) == 0) { + if (h5->state == H5_ACTIVE) + h5_peer_reset(hu); h5_link_control(hu, sync_rsp, 2); } else if (memcmp(data, sync_rsp, 2) == 0) { + if (h5->state == H5_ACTIVE) + h5_peer_reset(hu); h5->state = H5_INITIALIZED; h5_link_control(hu, conf_req, 3); } else if (memcmp(data, conf_req, 2) == 0) { diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index a2d1a9612c86c4730c6aa2f0ce038ca06da4f9c5..191a6a3ae6ca704195a41e8caf62b23b510f5bf4 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -517,6 +517,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) /* Just return here, no channel found */ return; + channel->rescind = true; + /* work is initialized for vmbus_process_rescind_offer() from * vmbus_process_offer() where the channel got created */ queue_work(channel->controlwq, &channel->work); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index fb61f6685809e21f853d849d056a9111e2c3b6d0..4b8c6116c058247f4980fa6e06ec986700ef0922 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -472,13 +472,13 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) skb = get_skb(skb, flowclen, GFP_KERNEL); flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); - flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | - FW_FLOWC_WR_NPARAMS(8)); - flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, - 16)) | FW_WR_FLOWID(ep->hwtid)); + flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | + FW_FLOWC_WR_NPARAMS_V(8)); + flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen, + 16)) | FW_WR_FLOWID_V(ep->hwtid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; - flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN + flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V (ep->com.dev->rdev.lldi.pf)); flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); @@ -649,31 +649,31 @@ static int send_connect(struct c4iw_ep *ep) * remainder will be specified in the rx_data_ack. */ win = ep->rcv_win >> 10; - if (win > RCV_BUFSIZ_MASK) - win = RCV_BUFSIZ_MASK; + if (win > RCV_BUFSIZ_M) + win = RCV_BUFSIZ_M; opt0 = (nocong ? NO_CONG(1) : 0) | - KEEP_ALIVE(1) | + KEEP_ALIVE_F | DELACK(1) | - WND_SCALE(wscale) | - MSS_IDX(mtu_idx) | - L2T_IDX(ep->l2t->idx) | - TX_CHAN(ep->tx_chan) | - SMAC_SEL(ep->smac_idx) | + WND_SCALE_V(wscale) | + MSS_IDX_V(mtu_idx) | + L2T_IDX_V(ep->l2t->idx) | + TX_CHAN_V(ep->tx_chan) | + SMAC_SEL_V(ep->smac_idx) | DSCP(ep->tos) | - ULP_MODE(ULP_MODE_TCPDDP) | - RCV_BUFSIZ(win); - opt2 = RX_CHANNEL(0) | + ULP_MODE_V(ULP_MODE_TCPDDP) | + RCV_BUFSIZ_V(win); + opt2 = RX_CHANNEL_V(0) | CCTRL_ECN(enable_ecn) | - RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); + RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); if (enable_tcp_timestamps) opt2 |= TSTAMPS_EN(1); if (enable_tcp_sack) opt2 |= SACK_EN(1); if (wscale && enable_tcp_window_scaling) - opt2 |= WND_SCALE_EN(1); + opt2 |= WND_SCALE_EN_F; if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { - opt2 |= T5_OPT_2_VALID; + opt2 |= T5_OPT_2_VALID_F; opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ } @@ -736,7 +736,7 @@ static int send_connect(struct c4iw_ep *ep) t5_req->local_ip = la->sin_addr.s_addr; t5_req->peer_ip = ra->sin_addr.s_addr; t5_req->opt0 = cpu_to_be64(opt0); - t5_req->params = cpu_to_be64(V_FILTER_TUPLE( + t5_req->params = cpu_to_be64(FILTER_TUPLE_V( cxgb4_select_ntuple( ep->com.dev->rdev.lldi.ports[0], ep->l2t))); @@ -762,7 +762,7 @@ static int send_connect(struct c4iw_ep *ep) t5_req6->peer_ip_lo = *((__be64 *) (ra6->sin6_addr.s6_addr + 8)); t5_req6->opt0 = cpu_to_be64(opt0); - t5_req6->params = cpu_to_be64(V_FILTER_TUPLE( + t5_req6->params = cpu_to_be64(FILTER_TUPLE_V( cxgb4_select_ntuple( ep->com.dev->rdev.lldi.ports[0], ep->l2t))); @@ -803,16 +803,16 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); memset(req, 0, wrlen); req->op_to_immdlen = cpu_to_be32( - FW_WR_OP(FW_OFLD_TX_DATA_WR) | - FW_WR_COMPL(1) | - FW_WR_IMMDLEN(mpalen)); + FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL_F | + FW_WR_IMMDLEN_V(mpalen)); req->flowid_len16 = cpu_to_be32( - FW_WR_FLOWID(ep->hwtid) | - FW_WR_LEN16(wrlen >> 4)); + FW_WR_FLOWID_V(ep->hwtid) | + FW_WR_LEN16_V(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( - FW_OFLD_TX_DATA_WR_FLUSH(1) | - FW_OFLD_TX_DATA_WR_SHOVE(1)); + FW_OFLD_TX_DATA_WR_FLUSH_F | + FW_OFLD_TX_DATA_WR_SHOVE_F); mpa = (struct mpa_message *)(req + 1); memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); @@ -897,16 +897,16 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); memset(req, 0, wrlen); req->op_to_immdlen = cpu_to_be32( - FW_WR_OP(FW_OFLD_TX_DATA_WR) | - FW_WR_COMPL(1) | - FW_WR_IMMDLEN(mpalen)); + FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL_F | + FW_WR_IMMDLEN_V(mpalen)); req->flowid_len16 = cpu_to_be32( - FW_WR_FLOWID(ep->hwtid) | - FW_WR_LEN16(wrlen >> 4)); + FW_WR_FLOWID_V(ep->hwtid) | + FW_WR_LEN16_V(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( - FW_OFLD_TX_DATA_WR_FLUSH(1) | - FW_OFLD_TX_DATA_WR_SHOVE(1)); + FW_OFLD_TX_DATA_WR_FLUSH_F | + FW_OFLD_TX_DATA_WR_SHOVE_F); mpa = (struct mpa_message *)(req + 1); memset(mpa, 0, sizeof(*mpa)); @@ -977,16 +977,16 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); memset(req, 0, wrlen); req->op_to_immdlen = cpu_to_be32( - FW_WR_OP(FW_OFLD_TX_DATA_WR) | - FW_WR_COMPL(1) | - FW_WR_IMMDLEN(mpalen)); + FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL_F | + FW_WR_IMMDLEN_V(mpalen)); req->flowid_len16 = cpu_to_be32( - FW_WR_FLOWID(ep->hwtid) | - FW_WR_LEN16(wrlen >> 4)); + FW_WR_FLOWID_V(ep->hwtid) | + FW_WR_LEN16_V(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( - FW_OFLD_TX_DATA_WR_FLUSH(1) | - FW_OFLD_TX_DATA_WR_SHOVE(1)); + FW_OFLD_TX_DATA_WR_FLUSH_F | + FW_OFLD_TX_DATA_WR_SHOVE_F); mpa = (struct mpa_message *)(req + 1); memset(mpa, 0, sizeof(*mpa)); @@ -1249,15 +1249,15 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) * due to the limit in the number of bits in the RCV_BUFSIZ field, * then add the overage in to the credits returned. */ - if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024) - credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024; + if (ep->rcv_win > RCV_BUFSIZ_M * 1024) + credits += ep->rcv_win - RCV_BUFSIZ_M * 1024; req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); - req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | + req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F | F_RX_DACK_CHANGE | V_RX_DACK_MODE(dack_mode)); set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); @@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); - req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); + req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.filter = cpu_to_be32(cxgb4_select_ntuple( ep->com.dev->rdev.lldi.ports[0], ep->l2t)); @@ -1762,10 +1762,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) req->le.pport = sin->sin_port; req->le.u.ipv4.pip = sin->sin_addr.s_addr; req->tcb.t_state_to_astid = - htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | - V_FW_OFLD_CONNECTION_WR_ASTID(atid)); + htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) | + FW_OFLD_CONNECTION_WR_ASTID_V(atid)); req->tcb.cplrxdataack_cplpassacceptrpl = - htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); + htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F); req->tcb.tx_max = (__force __be32) jiffies; req->tcb.rcv_adv = htons(1); best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, @@ -1778,34 +1778,34 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) * remainder will be specified in the rx_data_ack. */ win = ep->rcv_win >> 10; - if (win > RCV_BUFSIZ_MASK) - win = RCV_BUFSIZ_MASK; + if (win > RCV_BUFSIZ_M) + win = RCV_BUFSIZ_M; req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | (nocong ? NO_CONG(1) : 0) | - KEEP_ALIVE(1) | + KEEP_ALIVE_F | DELACK(1) | - WND_SCALE(wscale) | - MSS_IDX(mtu_idx) | - L2T_IDX(ep->l2t->idx) | - TX_CHAN(ep->tx_chan) | - SMAC_SEL(ep->smac_idx) | + WND_SCALE_V(wscale) | + MSS_IDX_V(mtu_idx) | + L2T_IDX_V(ep->l2t->idx) | + TX_CHAN_V(ep->tx_chan) | + SMAC_SEL_V(ep->smac_idx) | DSCP(ep->tos) | - ULP_MODE(ULP_MODE_TCPDDP) | - RCV_BUFSIZ(win)); + ULP_MODE_V(ULP_MODE_TCPDDP) | + RCV_BUFSIZ_V(win)); req->tcb.opt2 = (__force __be32) (PACE(1) | TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | - RX_CHANNEL(0) | + RX_CHANNEL_V(0) | CCTRL_ECN(enable_ecn) | - RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); + RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); if (enable_tcp_timestamps) - req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); + req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1); if (enable_tcp_sack) - req->tcb.opt2 |= (__force __be32) SACK_EN(1); + req->tcb.opt2 |= (__force __be32)SACK_EN(1); if (wscale && enable_tcp_window_scaling) - req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); - req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); - req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); + req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; + req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); + req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2); set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); set_bit(ACT_OFLD_CONN, &ep->com.history); c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); @@ -2178,28 +2178,28 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, * remainder will be specified in the rx_data_ack. */ win = ep->rcv_win >> 10; - if (win > RCV_BUFSIZ_MASK) - win = RCV_BUFSIZ_MASK; + if (win > RCV_BUFSIZ_M) + win = RCV_BUFSIZ_M; opt0 = (nocong ? NO_CONG(1) : 0) | - KEEP_ALIVE(1) | + KEEP_ALIVE_F | DELACK(1) | - WND_SCALE(wscale) | - MSS_IDX(mtu_idx) | - L2T_IDX(ep->l2t->idx) | - TX_CHAN(ep->tx_chan) | - SMAC_SEL(ep->smac_idx) | + WND_SCALE_V(wscale) | + MSS_IDX_V(mtu_idx) | + L2T_IDX_V(ep->l2t->idx) | + TX_CHAN_V(ep->tx_chan) | + SMAC_SEL_V(ep->smac_idx) | DSCP(ep->tos >> 2) | - ULP_MODE(ULP_MODE_TCPDDP) | - RCV_BUFSIZ(win); - opt2 = RX_CHANNEL(0) | - RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); + ULP_MODE_V(ULP_MODE_TCPDDP) | + RCV_BUFSIZ_V(win); + opt2 = RX_CHANNEL_V(0) | + RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); if (enable_tcp_timestamps && req->tcpopt.tstamp) opt2 |= TSTAMPS_EN(1); if (enable_tcp_sack && req->tcpopt.sack) opt2 |= SACK_EN(1); if (wscale && enable_tcp_window_scaling) - opt2 |= WND_SCALE_EN(1); + opt2 |= WND_SCALE_EN_F; if (enable_ecn) { const struct tcphdr *tcph; u32 hlen = ntohl(req->hdr_len); @@ -2211,7 +2211,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, } if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { u32 isn = (prandom_u32() & ~7UL) - 1; - opt2 |= T5_OPT_2_VALID; + opt2 |= T5_OPT_2_VALID_F; opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ rpl5 = (void *)rpl; @@ -3537,9 +3537,9 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); memset(req, 0, sizeof(*req)); - req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); - req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); - req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); + req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); + req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); + req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); req->le.filter = (__force __be32) filter; req->le.lport = lport; req->le.pport = rport; @@ -3548,16 +3548,16 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, req->tcb.rcv_nxt = htonl(rcv_isn + 1); req->tcb.rcv_adv = htons(window); req->tcb.t_state_to_astid = - htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) | - V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) | - V_FW_OFLD_CONNECTION_WR_ASTID( + htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | + FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | + FW_OFLD_CONNECTION_WR_ASTID_V( GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); /* * We store the qid in opt2 which will be used by the firmware * to send us the wr response. */ - req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); + req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid)); /* * We initialize the MSS index in TCB to 0xF. @@ -3565,7 +3565,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, * TCB picks up the correct value. If this was 0 * TP will ignore any value > 0 for MSS index. */ - req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); + req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); req->cookie = (unsigned long)skb; set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 0f773e78e0801ebf2b230298b15a3eb4bde15444..e9fd3a029296389cc63628319491edf7a2644a15 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -51,9 +51,9 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( - FW_WR_OP(FW_RI_RES_WR) | + FW_WR_OP_V(FW_RI_RES_WR) | V_FW_RI_RES_WR_NRES(1) | - FW_WR_COMPL(1)); + FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; res = res_wr->res; @@ -121,9 +121,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( - FW_WR_OP(FW_RI_RES_WR) | + FW_WR_OP_V(FW_RI_RES_WR) | V_FW_RI_RES_WR_NRES(1) | - FW_WR_COMPL(1)); + FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; res = res_wr->res; diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index ec7a2988a70344e1b784b6e68a2638f78cd439bc..0744455cd88b75f67a948be3f787f915ff8d7795 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -74,18 +74,18 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, req = (struct ulp_mem_io *)__skb_put(skb, wr_len); memset(req, 0, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); - req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | - (wait ? FW_WR_COMPL(1) : 0)); + req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | + (wait ? FW_WR_COMPL_F : 0)); req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; - req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); - req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); + req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); + req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); - req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5)); + req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5)); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); - req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr)); + req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr)); sgl = (struct ulptx_sgl *)(req + 1); - sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) | + sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(1)); sgl->len0 = cpu_to_be32(len); sgl->addr0 = cpu_to_be64(data); @@ -107,12 +107,12 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, u8 wr_len, *to_dp, *from_dp; int copy_len, num_wqe, i, ret = 0; struct c4iw_wr_wait wr_wait; - __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); + __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); if (is_t4(rdev->lldi.adapter_type)) - cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1)); + cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F); else - cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1)); + cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F); addr &= 0x7FFFFFF; PDBG("%s addr 0x%x len %u\n", __func__, addr, len); @@ -135,23 +135,23 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, INIT_ULPTX_WR(req, wr_len, 0, 0); if (i == (num_wqe-1)) { - req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | - FW_WR_COMPL(1)); + req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | + FW_WR_COMPL_F); req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; } else - req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR)); + req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); req->wr.wr_mid = cpu_to_be32( - FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); + FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); req->cmd = cmd; - req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( + req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V( DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); - req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3)); + req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3)); sc = (struct ulptx_idata *)(req + 1); - sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM)); + sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM)); sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); to_dp = (u8 *)(sc + 1); diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 72e3b69d1b76c3cf96763883cc2708eebb327d5a..66bd6a2ad83b04f34e2b5fbb822e0b8ff062359b 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -408,10 +408,10 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, PDBG("%s dev 0x%p\n", __func__, dev); return sprintf(buf, "%u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers), - FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers), - FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers), - FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers)); + FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers), + FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers), + FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers), + FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers)); } static ssize_t show_hca(struct device *dev, struct device_attribute *attr, diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 41cd6882b648128f06476e948c56b94792e807bb..2ed3ece2b2ee38de0bf38525914ffb85cb7ed80d 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -271,9 +271,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( - FW_WR_OP(FW_RI_RES_WR) | + FW_WR_OP_V(FW_RI_RES_WR) | V_FW_RI_RES_WR_NRES(2) | - FW_WR_COMPL(1)); + FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; res = res_wr->res; @@ -1082,10 +1082,10 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); - wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR)); + wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); wqe->flowid_len16 = cpu_to_be32( - FW_WR_FLOWID(qhp->ep->hwtid) | - FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); + FW_WR_FLOWID_V(qhp->ep->hwtid) | + FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); @@ -1204,11 +1204,11 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32( - FW_WR_OP(FW_RI_INIT_WR) | - FW_WR_COMPL(1)); + FW_WR_OP_V(FW_RI_INIT_WR) | + FW_WR_COMPL_F); wqe->flowid_len16 = cpu_to_be32( - FW_WR_FLOWID(ep->hwtid) | - FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); + FW_WR_FLOWID_V(ep->hwtid) | + FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); wqe->cookie = (unsigned long) &ep->com.wr_wait; wqe->u.fini.type = FW_RI_TYPE_FINI; @@ -1273,11 +1273,11 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32( - FW_WR_OP(FW_RI_INIT_WR) | - FW_WR_COMPL(1)); + FW_WR_OP_V(FW_RI_INIT_WR) | + FW_WR_COMPL_F); wqe->flowid_len16 = cpu_to_be32( - FW_WR_FLOWID(qhp->ep->hwtid) | - FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); + FW_WR_FLOWID_V(qhp->ep->hwtid) | + FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 1066eec854a9a07bfa1d4ad771fe2c6b979ee7ae..a3b70f6c4035b2f9e106941e057d36258019b7c1 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -233,7 +233,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector if (err) goto err_dbmap; - cq->mcq.comp = mlx4_ib_cq_comp; + if (context) + cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; + else + cq->mcq.comp = mlx4_ib_cq_comp; cq->mcq.event = mlx4_ib_cq_event; if (context) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8b72cf392b34454abbeca0c5a1c8e17aca39ba13..57ecc5b204f3f6fdb01f1c2bebcdb6c752ad2ece 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1975,8 +1975,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) dev->caps.num_ports > dev->caps.comp_pool) return; - eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ - dev->caps.num_ports); + eq_per_port = dev->caps.comp_pool / dev->caps.num_ports; /* Init eq table */ added_eqs = 0; @@ -2228,7 +2227,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, MLX4_IB_UC_STEER_QPN_ALIGN, - &ibdev->steer_qpn_base); + &ibdev->steer_qpn_base, 0); if (err) goto err_counter; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 9c5150c3cb311a147195eab8f8a7b9290e6629fd..cf000b7ad64f9b11698ee8dbfa2aec9883e886db 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -802,16 +802,21 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, } } } else { - /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE - * BlueFlame setup flow wrongly causes VLAN insertion. */ + /* Raw packet QPNs may not have bits 6,7 set in their qp_num; + * otherwise, the WQE BlueFlame setup flow wrongly causes + * VLAN insertion. */ if (init_attr->qp_type == IB_QPT_RAW_PACKET) - err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn); + err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, + (init_attr->cap.max_send_wr ? + MLX4_RESERVE_ETH_BF_QP : 0) | + (init_attr->cap.max_recv_wr ? + MLX4_RESERVE_A0_QP : 0)); else if (qp->flags & MLX4_IB_QP_NETIF) err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn); else err = mlx4_qp_reserve_range(dev->dev, 1, 1, - &qpn); + &qpn, 0); if (err) goto err_proxy; } diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 10cfce5119a9d2f4ef89c68967236285e7a8730e..c463e7bba5f453f0303a74b88ec96275c796ee0f 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -805,14 +805,14 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, } - mlx5_vfree(cqb); + kvfree(cqb); return &cq->ibcq; err_cmd: mlx5_core_destroy_cq(dev->mdev, &cq->mcq); err_cqb: - mlx5_vfree(cqb); + kvfree(cqb); if (context) destroy_cq_user(cq, context); else @@ -1159,11 +1159,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) } mutex_unlock(&cq->resize_mutex); - mlx5_vfree(in); + kvfree(in); return 0; ex_alloc: - mlx5_vfree(in); + kvfree(in); ex_resize: if (udata) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 8ee7cb46e0590668bd79277bc672a9d41f90f02d..5a80dd9937612f5804d64135838ff389570e6b6e 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -159,6 +159,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) sizeof(*in), reg_mr_callback, mr, &mr->out); if (err) { + spin_lock_irq(&ent->lock); + ent->pending--; + spin_unlock_irq(&ent->lock); mlx5_ib_warn(dev, "create mkey failed %d\n", err); kfree(mr); break; @@ -853,14 +856,14 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, goto err_2; } mr->umem = umem; - mlx5_vfree(in); + kvfree(in); mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key); return mr; err_2: - mlx5_vfree(in); + kvfree(in); err_1: kfree(mr); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index e261a53f9a02a5edf18725cc75daaef3661fc64a..1cae1c7132b4b6f84ed5055c1c53e426edf8a354 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -647,7 +647,7 @@ err_unmap: mlx5_ib_db_unmap_user(context, &qp->db); err_free: - mlx5_vfree(*in); + kvfree(*in); err_umem: if (qp->umem) @@ -761,7 +761,7 @@ err_wrid: kfree(qp->rq.wrid); err_free: - mlx5_vfree(*in); + kvfree(*in); err_buf: mlx5_buf_free(dev->mdev, &qp->buf); @@ -971,7 +971,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, goto err_create; } - mlx5_vfree(in); + kvfree(in); /* Hardware wants QPN written in big-endian order (after * shifting) for send doorbell. Precompute this value to save * a little bit when posting sends. @@ -988,7 +988,7 @@ err_create: else if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); - mlx5_vfree(in); + kvfree(in); return err; } @@ -1011,9 +1011,14 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv } } else { spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); } } else if (recv_cq) { spin_lock_irq(&recv_cq->lock); + __acquire(&send_cq->lock); + } else { + __acquire(&send_cq->lock); + __acquire(&recv_cq->lock); } } @@ -1033,10 +1038,15 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re spin_unlock_irq(&recv_cq->lock); } } else { + __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } } else if (recv_cq) { + __release(&send_cq->lock); spin_unlock_irq(&recv_cq->lock); + } else { + __release(&recv_cq->lock); + __release(&send_cq->lock); } } @@ -2411,7 +2421,7 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr) static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, - struct ib_send_wr *wr, int *idx, + struct ib_send_wr *wr, unsigned *idx, int *size, int nreq) { int err = 0; @@ -2737,6 +2747,8 @@ out: if (bf->need_lock) spin_lock(&bf->lock); + else + __acquire(&bf->lock); /* TBD enable WC */ if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { @@ -2753,6 +2765,8 @@ out: bf->offset ^= bf->buf_size; if (bf->need_lock) spin_unlock(&bf->lock); + else + __release(&bf->lock); } spin_unlock_irqrestore(&qp->sq.lock, flags); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 97cc1baaa8e3e8d1a3ea2c8b66dd344b14dc470b..41fec66217dd3b2b7f61fcff7068f01a06493933 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -141,7 +141,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, return 0; err_in: - mlx5_vfree(*in); + kvfree(*in); err_umem: ib_umem_release(srq->umem); @@ -209,7 +209,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, return 0; err_in: - mlx5_vfree(*in); + kvfree(*in); err_buf: mlx5_buf_free(dev->mdev, &srq->buf); @@ -306,7 +306,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); in->ctx.db_record = cpu_to_be64(srq->db.dma); err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen); - mlx5_vfree(in); + kvfree(in); if (err) { mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); goto err_usr_kern_srq; diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c index 36c1b37cea0ac4ba7f4578ae9ae28454dcd75c2e..9846d82eb0975608bc1d0706a311d379dbe7e4eb 100644 --- a/drivers/isdn/capi/capiutil.c +++ b/drivers/isdn/capi/capiutil.c @@ -201,7 +201,7 @@ static unsigned char *cpars[] = #define structTRcpyovl(x, y, l) memmove(y, x, l) /*-------------------------------------------------------*/ -static unsigned command_2_index(unsigned c, unsigned sc) +static unsigned command_2_index(u8 c, u8 sc) { if (c & 0x80) c = 0x9 + (c & 0x0f); diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig index dde5e09e62675efa7ae108fd7a61e7488f4dcbb5..83f62b8d82b590de98a0fc33758038b0fd37aabe 100644 --- a/drivers/isdn/gigaset/Kconfig +++ b/drivers/isdn/gigaset/Kconfig @@ -20,7 +20,7 @@ if ISDN_DRV_GIGASET config GIGASET_CAPI bool "Gigaset CAPI support" depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m') - default ISDN_I4L='n' + default 'y' help Build the Gigaset driver as a CAPI 2.0 driver interfacing with the Kernel CAPI subsystem. To use it with the old ISDN4Linux diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h index eb63a0f7a02a94191ef54c54740c778ece40a2cf..166537e2dfca9cc5838386090eafc4a7718cc4d7 100644 --- a/drivers/isdn/gigaset/gigaset.h +++ b/drivers/isdn/gigaset/gigaset.h @@ -751,9 +751,6 @@ void gigaset_stop(struct cardstate *cs); /* Tell common.c that the driver is being unloaded. */ int gigaset_shutdown(struct cardstate *cs); -/* Tell common.c that an skb has been sent. */ -void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb); - /* Append event to the queue. * Returns NULL on failure or a pointer to the event on success. * ptr must be kmalloc()ed (and not be freed by the caller). diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index a8e652dac54d8aed3691fdb98920328a51a45e49..5f306e2eece5e2a93c1dd86ac8eb29ab59a33e06 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -293,7 +293,7 @@ static int gigaset_close_bchannel(struct bc_state *bcs) } static int write_modem(struct cardstate *cs); -static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb); +static int send_cb(struct cardstate *cs); /* Write tasklet handler: Continue sending current skb, or send command, or @@ -303,8 +303,6 @@ static void gigaset_modem_fill(unsigned long data) { struct cardstate *cs = (struct cardstate *) data; struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ - struct cmdbuf_t *cb; - int again; gig_dbg(DEBUG_OUTPUT, "modem_fill"); @@ -313,36 +311,32 @@ static void gigaset_modem_fill(unsigned long data) return; } - do { - again = 0; - if (!bcs->tx_skb) { /* no skb is being sent */ - cb = cs->cmdbuf; - if (cb) { /* commands to send? */ - gig_dbg(DEBUG_OUTPUT, "modem_fill: cb"); - if (send_cb(cs, cb) < 0) { - gig_dbg(DEBUG_OUTPUT, - "modem_fill: send_cb failed"); - again = 1; /* no callback will be - called! */ - } - } else { /* skbs to send? */ - bcs->tx_skb = skb_dequeue(&bcs->squeue); - if (bcs->tx_skb) - gig_dbg(DEBUG_INTR, - "Dequeued skb (Adr: %lx)!", - (unsigned long) bcs->tx_skb); - } - } - - if (bcs->tx_skb) { - gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb"); - if (write_modem(cs) < 0) { +again: + if (!bcs->tx_skb) { /* no skb is being sent */ + if (cs->cmdbuf) { /* commands to send? */ + gig_dbg(DEBUG_OUTPUT, "modem_fill: cb"); + if (send_cb(cs) < 0) { gig_dbg(DEBUG_OUTPUT, - "modem_fill: write_modem failed"); - again = 1; /* no callback will be called! */ + "modem_fill: send_cb failed"); + goto again; /* no callback will be called! */ } + return; } - } while (again); + + /* skbs to send? */ + bcs->tx_skb = skb_dequeue(&bcs->squeue); + if (!bcs->tx_skb) + return; + + gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)!", + (unsigned long) bcs->tx_skb); + } + + gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb"); + if (write_modem(cs) < 0) { + gig_dbg(DEBUG_OUTPUT, "modem_fill: write_modem failed"); + goto again; /* no callback will be called! */ + } } /* @@ -429,9 +423,9 @@ static void gigaset_write_bulk_callback(struct urb *urb) spin_unlock_irqrestore(&cs->lock, flags); } -static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) +static int send_cb(struct cardstate *cs) { - struct cmdbuf_t *tcb; + struct cmdbuf_t *cb = cs->cmdbuf; unsigned long flags; int count; int status = -ENOENT; @@ -439,26 +433,27 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) do { if (!cb->len) { - tcb = cb; - spin_lock_irqsave(&cs->cmdlock, flags); cs->cmdbytes -= cs->curlen; gig_dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left", cs->curlen, cs->cmdbytes); - cs->cmdbuf = cb = cb->next; - if (cb) { - cb->prev = NULL; - cs->curlen = cb->len; + cs->cmdbuf = cb->next; + if (cs->cmdbuf) { + cs->cmdbuf->prev = NULL; + cs->curlen = cs->cmdbuf->len; } else { cs->lastcmdbuf = NULL; cs->curlen = 0; } spin_unlock_irqrestore(&cs->cmdlock, flags); - if (tcb->wake_tasklet) - tasklet_schedule(tcb->wake_tasklet); - kfree(tcb); + if (cb->wake_tasklet) + tasklet_schedule(cb->wake_tasklet); + kfree(cb); + + cb = cs->cmdbuf; } + if (cb) { count = min(cb->len, ucs->bulk_out_size); gig_dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count); diff --git a/drivers/isdn/hisax/hfc_2bs0.c b/drivers/isdn/hisax/hfc_2bs0.c index 838531b6a60e487d69b958bf1fd759d150e54e75..14dada42874ef7aa709311a5e69f9a526ad5a26b 100644 --- a/drivers/isdn/hisax/hfc_2bs0.c +++ b/drivers/isdn/hisax/hfc_2bs0.c @@ -31,7 +31,7 @@ WaitForBusy(struct IsdnCardState *cs) to--; } if (!to) { - printk(KERN_WARNING "HiSax: waitforBusy timeout\n"); + printk(KERN_WARNING "HiSax: %s timeout\n", __func__); return (0); } else return (to); diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c index fa1fefd711cde875fbcdf9b56a20f5375f39ced3..b1fad81f0722e64b2e0d671215e8dc89c295f4a1 100644 --- a/drivers/isdn/hisax/hfc_sx.c +++ b/drivers/isdn/hisax/hfc_sx.c @@ -1159,7 +1159,8 @@ hfcsx_l2l1(struct PStack *st, int pr, void *arg) case (PH_PULL | INDICATION): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { - printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n"); + printk(KERN_WARNING "%s: this shouldn't happen\n", + __func__); } else { // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index 849a80752685bdab2ad945824eadd03c924ff7f5..678bd5224bc338a2767a106fb8057f544f218050 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -927,9 +927,8 @@ start_int_fifo(usb_fifo *fifo) fifo->active = 1; /* must be marked active */ errcode = usb_submit_urb(fifo->urb, GFP_KERNEL); if (errcode) { - printk(KERN_ERR - "HFC-S USB: submit URB error(start_int_info): status:%i\n", - errcode); + printk(KERN_ERR "HFC-S USB: submit URB error(%s): status:%i\n", + __func__, errcode); fifo->active = 0; fifo->skbuff = NULL; } diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c index 5faa5de24305623bd38bcd4b0047f0063708f172..9cc26b40a43771dee4d670f2416711897bc9ac1a 100644 --- a/drivers/isdn/hisax/ipacx.c +++ b/drivers/isdn/hisax/ipacx.c @@ -580,7 +580,7 @@ bch_fill_fifo(struct BCState *bcs) if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; - t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count); + t += sprintf(t, "%s() B-%d cnt %d", __func__, hscx, count); QuickHex(t, ptr, count); debugl1(cs, "%s", bcs->blog); } diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c index 800095781bfb7daa11ee5dba382176f9ba012473..a560842c0e48d4967f7b39d19e0a005553dc644f 100644 --- a/drivers/isdn/hisax/isdnl1.c +++ b/drivers/isdn/hisax/isdnl1.c @@ -867,7 +867,7 @@ l1_msg(struct IsdnCardState *cs, int pr, void *arg) { break; default: if (cs->debug) - debugl1(cs, "l1msg %04X unhandled", pr); + debugl1(cs, "%s %04X unhandled", __func__, pr); break; } st = st->next; diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c index 45b03840f71691b4495fa6004f4e34c5a7e0d0ec..c754706f83cdc190ca18e299896590430e7b9824 100644 --- a/drivers/isdn/hisax/isdnl3.c +++ b/drivers/isdn/hisax/isdnl3.c @@ -153,7 +153,7 @@ void newl3state(struct l3_process *pc, int state) { if (pc->debug & L3_DEB_STATE) - l3_debug(pc->st, "newstate cr %d %d --> %d", + l3_debug(pc->st, "%s cr %d %d --> %d", __func__, pc->callref & 0x7F, pc->state, state); pc->state = state; diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c index 00aad10507d8a05710b5955cb47f1c4c97612fd4..93bae94314a69e7bb2a4502475c3a348e67808a4 100644 --- a/drivers/isdn/hysdn/hycapi.c +++ b/drivers/isdn/hysdn/hycapi.c @@ -501,7 +501,7 @@ static char *hycapi_procinfo(struct capi_ctr *ctrl) { hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); #ifdef HYCAPI_PRINTFNAMES - printk(KERN_NOTICE "hycapi_proc_info\n"); + printk(KERN_NOTICE "%s\n", __func__); #endif if (!cinfo) return ""; diff --git a/drivers/isdn/mISDN/l1oip_codec.c b/drivers/isdn/mISDN/l1oip_codec.c index a601c8472220764c99d004dddf54c238437d1f06..9b033be11a5f0e7a3331429001bdc8cdbac0e4a9 100644 --- a/drivers/isdn/mISDN/l1oip_codec.c +++ b/drivers/isdn/mISDN/l1oip_codec.c @@ -312,10 +312,8 @@ l1oip_ulaw_to_alaw(u8 *data, int len, u8 *result) void l1oip_4bit_free(void) { - if (table_dec) - vfree(table_dec); - if (table_com) - vfree(table_com); + vfree(table_dec); + vfree(table_com); table_com = NULL; table_dec = NULL; } diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 1be82284cf9dfaa8ab5f5078bed78c3aec095ed3..84b35925ee4dfba74eecaefe85913ee471a74494 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -163,7 +163,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb), MISDN_HEADER_LEN); - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); mISDN_sock_cmsg(sk, msg, skb); @@ -203,7 +203,7 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock, if (!skb) goto done; - if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + if (memcpy_from_msg(skb_put(skb, len), msg, len)) { err = -EFAULT; goto done; } diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c index 42ecfef80132b098735ab2a4d96bc9ec2094eece..46e1240ae0741a54008a976602b11a90f00ea685 100644 --- a/drivers/isdn/pcbit/layer2.c +++ b/drivers/isdn/pcbit/layer2.c @@ -85,7 +85,6 @@ pcbit_l2_write(struct pcbit_dev *dev, ulong msg, ushort refnum, } if ((frame = kmalloc(sizeof(struct frame_buf), GFP_ATOMIC)) == NULL) { - printk(KERN_WARNING "pcbit_2_write: kmalloc failed\n"); dev_kfree_skb(skb); return -1; } diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 1b7b303085d28d459c2595776fd82b15cf9a0dc5..7aaaf51e1596c5dc399cfa096040822275a05bae 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "vmci_handle_array.h" #include "vmci_queue_pair.h" @@ -429,11 +430,11 @@ static int __qp_memcpy_from_queue(void *dest, to_copy = size - bytes_copied; if (is_iovec) { - struct iovec *iov = (struct iovec *)dest; + struct msghdr *msg = dest; int err; /* The iovec will track bytes_copied internally. */ - err = memcpy_toiovec(iov, (u8 *)va + page_offset, + err = memcpy_to_msg(msg, (u8 *)va + page_offset, to_copy); if (err != 0) { if (kernel_if->host) @@ -3264,13 +3265,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_enquev); * of bytes dequeued or < 0 on error. */ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, - void *iov, + struct msghdr *msg, size_t iov_size, int buf_type) { ssize_t result; - if (!qpair || !iov) + if (!qpair) return VMCI_ERROR_INVALID_ARGS; qp_lock(qpair); @@ -3279,7 +3280,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, result = qp_dequeue_locked(qpair->produce_q, qpair->consume_q, qpair->consume_q_size, - iov, iov_size, + msg, iov_size, qp_memcpy_from_queue_iov, true); @@ -3308,13 +3309,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_dequev); * of bytes peeked or < 0 on error. */ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, - void *iov, + struct msghdr *msg, size_t iov_size, int buf_type) { ssize_t result; - if (!qpair || !iov) + if (!qpair) return VMCI_ERROR_INVALID_ARGS; qp_lock(qpair); @@ -3323,7 +3324,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, result = qp_dequeue_locked(qpair->produce_q, qpair->consume_q, qpair->consume_q_size, - iov, iov_size, + msg, iov_size, qp_memcpy_from_queue_iov, false); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f9009be3f3077057222984bbf6d03fdac8fcbe7c..d6607ee9c85506bb4fcc5d2077b8dc4e582d4b4b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -145,6 +145,26 @@ config MACVTAP To compile this driver as a module, choose M here: the module will be called macvtap. + +config IPVLAN + tristate "IP-VLAN support" + depends on INET + depends on IPV6 + ---help--- + This allows one to create virtual devices off of a main interface + and packets will be delivered based on the dest L3 (IPv6/IPv4 addr) + on packets. All interfaces (including the main interface) share L2 + making it transparent to the connected L2 switch. + + Ipvlan devices can be added using the "ip" command from the + iproute2 package starting with the iproute2-X.Y.ZZ release: + + "ip link add link [ NAME ] type ipvlan" + + To compile this driver as a module, choose M here: the module + will be called ipvlan. + + config VXLAN tristate "Virtual eXtensible Local Area Network (VXLAN)" depends on INET diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 61aefdd1e173932109b175e3de37688e6ad160c0..e25fdd7d905e334dff7ccbcaba73b57436248f9f 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -6,6 +6,7 @@ # Networking Core Drivers # obj-$(CONFIG_BONDING) += bonding/ +obj-$(CONFIG_IPVLAN) += ipvlan/ obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_IFB) += ifb.o diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 2110215f3528fb0cf41c7c188de3821d171e170b..8baa87df173841ea90e07895dbb0620d4abb0a8d 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -29,8 +29,8 @@ #include #include #include -#include "bonding.h" -#include "bond_3ad.h" +#include +#include /* General definitions */ #define AD_SHORT_TIMEOUT 1 @@ -79,15 +79,21 @@ * -------------------------------------------------------------- * 16 6 1 0 */ -#define AD_DUPLEX_KEY_BITS 0x1 -#define AD_SPEED_KEY_BITS 0x3E -#define AD_USER_KEY_BITS 0xFFC0 - -#define AD_LINK_SPEED_BITMASK_1MBPS 0x1 -#define AD_LINK_SPEED_BITMASK_10MBPS 0x2 -#define AD_LINK_SPEED_BITMASK_100MBPS 0x4 -#define AD_LINK_SPEED_BITMASK_1000MBPS 0x8 -#define AD_LINK_SPEED_BITMASK_10000MBPS 0x10 +#define AD_DUPLEX_KEY_MASKS 0x1 +#define AD_SPEED_KEY_MASKS 0x3E +#define AD_USER_KEY_MASKS 0xFFC0 + +enum ad_link_speed_type { + AD_LINK_SPEED_1MBPS = 1, + AD_LINK_SPEED_10MBPS, + AD_LINK_SPEED_100MBPS, + AD_LINK_SPEED_1000MBPS, + AD_LINK_SPEED_2500MBPS, + AD_LINK_SPEED_10000MBPS, + AD_LINK_SPEED_20000MBPS, + AD_LINK_SPEED_40000MBPS, + AD_LINK_SPEED_56000MBPS +}; /* compare MAC addresses */ #define MAC_ADDRESS_EQUAL(A, B) \ @@ -240,12 +246,16 @@ static inline int __check_agg_selection_timer(struct port *port) * __get_link_speed - get a port's speed * @port: the port we're looking at * - * Return @port's speed in 802.3ad bitmask format. i.e. one of: + * Return @port's speed in 802.3ad enum format. i.e. one of: * 0, - * %AD_LINK_SPEED_BITMASK_10MBPS, - * %AD_LINK_SPEED_BITMASK_100MBPS, - * %AD_LINK_SPEED_BITMASK_1000MBPS, - * %AD_LINK_SPEED_BITMASK_10000MBPS + * %AD_LINK_SPEED_10MBPS, + * %AD_LINK_SPEED_100MBPS, + * %AD_LINK_SPEED_1000MBPS, + * %AD_LINK_SPEED_2500MBPS, + * %AD_LINK_SPEED_10000MBPS + * %AD_LINK_SPEED_20000MBPS + * %AD_LINK_SPEED_40000MBPS + * %AD_LINK_SPEED_56000MBPS */ static u16 __get_link_speed(struct port *port) { @@ -262,19 +272,35 @@ static u16 __get_link_speed(struct port *port) else { switch (slave->speed) { case SPEED_10: - speed = AD_LINK_SPEED_BITMASK_10MBPS; + speed = AD_LINK_SPEED_10MBPS; break; case SPEED_100: - speed = AD_LINK_SPEED_BITMASK_100MBPS; + speed = AD_LINK_SPEED_100MBPS; break; case SPEED_1000: - speed = AD_LINK_SPEED_BITMASK_1000MBPS; + speed = AD_LINK_SPEED_1000MBPS; + break; + + case SPEED_2500: + speed = AD_LINK_SPEED_2500MBPS; break; case SPEED_10000: - speed = AD_LINK_SPEED_BITMASK_10000MBPS; + speed = AD_LINK_SPEED_10000MBPS; + break; + + case SPEED_20000: + speed = AD_LINK_SPEED_20000MBPS; + break; + + case SPEED_40000: + speed = AD_LINK_SPEED_40000MBPS; + break; + + case SPEED_56000: + speed = AD_LINK_SPEED_56000MBPS; break; default: @@ -625,21 +651,33 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) if (aggregator->num_of_ports) { switch (__get_link_speed(aggregator->lag_ports)) { - case AD_LINK_SPEED_BITMASK_1MBPS: + case AD_LINK_SPEED_1MBPS: bandwidth = aggregator->num_of_ports; break; - case AD_LINK_SPEED_BITMASK_10MBPS: + case AD_LINK_SPEED_10MBPS: bandwidth = aggregator->num_of_ports * 10; break; - case AD_LINK_SPEED_BITMASK_100MBPS: + case AD_LINK_SPEED_100MBPS: bandwidth = aggregator->num_of_ports * 100; break; - case AD_LINK_SPEED_BITMASK_1000MBPS: + case AD_LINK_SPEED_1000MBPS: bandwidth = aggregator->num_of_ports * 1000; break; - case AD_LINK_SPEED_BITMASK_10000MBPS: + case AD_LINK_SPEED_2500MBPS: + bandwidth = aggregator->num_of_ports * 2500; + break; + case AD_LINK_SPEED_10000MBPS: bandwidth = aggregator->num_of_ports * 10000; break; + case AD_LINK_SPEED_20000MBPS: + bandwidth = aggregator->num_of_ports * 20000; + break; + case AD_LINK_SPEED_40000MBPS: + bandwidth = aggregator->num_of_ports * 40000; + break; + case AD_LINK_SPEED_56000MBPS: + bandwidth = aggregator->num_of_ports * 56000; + break; default: bandwidth = 0; /* to silence the compiler */ } @@ -1011,7 +1049,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) port->sm_rx_state); switch (port->sm_rx_state) { case AD_RX_INITIALIZE: - if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) + if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)) port->sm_vars &= ~AD_PORT_LACP_ENABLED; else port->sm_vars |= AD_PORT_LACP_ENABLED; @@ -1318,7 +1356,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) /* update the new aggregator's parameters * if port was responsed from the end-user */ - if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS) + if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS) /* if port is full duplex */ port->aggregator->is_individual = false; else @@ -1846,7 +1884,7 @@ void bond_3ad_bind_slave(struct slave *slave) /* if the port is not full duplex, then the port should be not * lacp Enabled */ - if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) + if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)) port->sm_vars &= ~AD_PORT_LACP_ENABLED; /* actor system is the bond's system */ port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; @@ -2214,7 +2252,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) spin_lock_bh(&slave->bond->mode_lock); - port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; + port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS; port->actor_oper_port_key = port->actor_admin_port_key |= (__get_link_speed(port) << 1); netdev_dbg(slave->bond->dev, "Port %d changed speed\n", port->actor_port_number); @@ -2247,7 +2285,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) spin_lock_bh(&slave->bond->mode_lock); - port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; + port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS; port->actor_oper_port_key = port->actor_admin_port_key |= __get_duplex(port); netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number); @@ -2289,18 +2327,18 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) */ if (link == BOND_LINK_UP) { port->is_enabled = true; - port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; + port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS; port->actor_oper_port_key = port->actor_admin_port_key |= __get_duplex(port); - port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; + port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS; port->actor_oper_port_key = port->actor_admin_port_key |= (__get_link_speed(port) << 1); } else { /* link has failed */ port->is_enabled = false; - port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; + port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS; port->actor_oper_port_key = (port->actor_admin_port_key &= - ~AD_SPEED_KEY_BITS); + ~AD_SPEED_KEY_MASKS); } netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n", port->actor_port_number, diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index d2eadab787c55d8fc0f361755409a880d64abfb3..bb9e9fc45e1ba4f84bbc60dceb79c3b6c7103ae1 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -37,8 +37,8 @@ #include #include #include -#include "bonding.h" -#include "bond_alb.h" +#include +#include @@ -475,12 +475,8 @@ static void rlb_update_client(struct rlb_client_info *client_info) skb->dev = client_info->slave->dev; if (client_info->vlan_id) { - skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id); - if (!skb) { - netdev_err(client_info->slave->bond->dev, - "failed to insert VLAN tag\n"); - continue; - } + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + client_info->vlan_id); } arp_xmit(skb); @@ -951,13 +947,8 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], skb->priority = TC_PRIO_CONTROL; skb->dev = slave->dev; - if (vid) { - skb = vlan_put_tag(skb, vlan_proto, vid); - if (!skb) { - netdev_err(slave->bond->dev, "failed to insert VLAN tag\n"); - return; - } - } + if (vid) + __vlan_hwaccel_put_tag(skb, vlan_proto, vid); dev_queue_xmit(skb); } @@ -1326,7 +1317,7 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, } /* no suitable interface, frame not sent */ - dev_kfree_skb_any(skb); + bond_tx_drop(bond->dev, skb); out: return NETDEV_TX_OK; } diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index 8f99082f90ebed3bf7d91a8914b66704a1c74512..e52e25a977fa203d82d4d35260584c1b1ef644a9 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -3,8 +3,8 @@ #include #include -#include "bonding.h" -#include "bond_alb.h" +#include +#include #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a5115fb7cf331b9c39ed19a0b53d1ff527739295..184c434ae3055e4b8a586c116d1274ddbccec6c5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -77,9 +77,9 @@ #include #include #include -#include "bonding.h" -#include "bond_3ad.h" -#include "bond_alb.h" +#include +#include +#include /*---------------------------- Module parameters ----------------------------*/ @@ -1526,6 +1526,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } #endif + if (!(bond_dev->features & NETIF_F_LRO)) + dev_disable_lro(slave_dev); + res = netdev_rx_handler_register(slave_dev, bond_handle_frame, new_slave); if (res) { @@ -2143,8 +2146,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n", ntohs(outer_tag->vlan_proto), tags->vlan_id); - skb = __vlan_put_tag(skb, tags->vlan_proto, - tags->vlan_id); + skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto, + tags->vlan_id); if (!skb) { net_err_ratelimited("failed to insert inner VLAN tag\n"); return; @@ -2156,12 +2159,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, if (outer_tag->vlan_id) { netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n", ntohs(outer_tag->vlan_proto), outer_tag->vlan_id); - skb = vlan_put_tag(skb, outer_tag->vlan_proto, - outer_tag->vlan_id); - if (!skb) { - net_err_ratelimited("failed to insert outer VLAN tag\n"); - return; - } + __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto, + outer_tag->vlan_id); } xmit: @@ -3523,7 +3522,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl } } /* no slave that can tx has been found */ - dev_kfree_skb_any(skb); + bond_tx_drop(bond->dev, skb); } /** @@ -3585,7 +3584,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev slave_id = bond_rr_gen_slave_id(bond); bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); } else { - dev_kfree_skb_any(skb); + bond_tx_drop(bond_dev, skb); } } @@ -3604,7 +3603,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d if (slave) bond_dev_queue_xmit(bond, skb, slave->dev); else - dev_kfree_skb_any(skb); + bond_tx_drop(bond_dev, skb); return NETDEV_TX_OK; } @@ -3748,8 +3747,7 @@ int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; bond_dev_queue_xmit(bond, skb, slave->dev); } else { - dev_kfree_skb_any(skb); - atomic_long_inc(&dev->tx_dropped); + bond_tx_drop(dev, skb); } return NETDEV_TX_OK; @@ -3779,7 +3777,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) bond_dev_queue_xmit(bond, skb, slave->dev); else - dev_kfree_skb_any(skb); + bond_tx_drop(bond_dev, skb); return NETDEV_TX_OK; } @@ -3859,7 +3857,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev /* Should never happen, mode already checked */ netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); WARN_ON_ONCE(1); - dev_kfree_skb_any(skb); + bond_tx_drop(dev, skb); return NETDEV_TX_OK; } } @@ -3879,7 +3877,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) if (bond_has_slaves(bond)) ret = __bond_start_xmit(skb, dev); else - dev_kfree_skb_any(skb); + bond_tx_drop(dev, skb); rcu_read_unlock(); return ret; diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 45f09a66e6c96662febe3015fad41b4e5e6251bf..7b11243660113484f59d97e2b3ffc9d5b1cb5d98 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -17,7 +17,7 @@ #include #include #include -#include "bonding.h" +#include static size_t bond_get_slave_size(const struct net_device *bond_dev, const struct net_device *slave_dev) diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index b62697f4a3deb90a5c2209ad6f032c6984284510..1a61cc9b3402b1e96e973cf88f5e51bb4d56bad2 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -16,7 +16,7 @@ #include #include #include -#include "bonding.h" +#include static int bond_option_active_slave_set(struct bonding *bond, const struct bond_opt_value *newval); diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index a3948f8d1e533d86502c3efa4eac839dcba0e557..976f5ad2a0f2d17975e0523a70c0d497b5a4ee32 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -2,7 +2,7 @@ #include #include #include -#include "bonding.h" +#include static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 8ffbafd500fd439f4277489aa533790f8f43e774..7e9e151d4d6168821e28ef82e8c197b2c4666b87 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -40,7 +40,7 @@ #include #include -#include "bonding.h" +#include #define to_dev(obj) container_of(obj, struct device, kobj) #define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd)))) diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index b01b0ce4d1bef13d48c6f41c4e2e6a8f5d50e8d4..23618a8316122e87c074c863b0dc687f7ed1dcea 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -12,7 +12,7 @@ #include #include -#include "bonding.h" +#include struct slave_attribute { struct attribute attr; diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index fc9304143f449c8eee3aaddcaf1d701fc90b7bb5..c533c62b0f5ee7456c152e29f07e302924edb8a1 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -29,4 +29,5 @@ obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_RCAR) += rcar_can.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o -subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG +subdir-ccflags-y += -D__CHECK_ENDIAN__ +subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 8e78bb48f5a4399f0fef7e991d64984c638c077a..f94a9fa60488ed8e23523f5f8c3665133f039dbb 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -603,6 +604,8 @@ static int c_can_start(struct net_device *dev) priv->can.state = CAN_STATE_ERROR_ACTIVE; + /* activate pins */ + pinctrl_pm_select_default_state(dev->dev.parent); return 0; } @@ -611,6 +614,9 @@ static void c_can_stop(struct net_device *dev) struct c_can_priv *priv = netdev_priv(dev); c_can_irq_control(priv, false); + + /* deactivate pins */ + pinctrl_pm_select_sleep_state(dev->dev.parent); priv->can.state = CAN_STATE_STOPPED; } @@ -1244,6 +1250,13 @@ int register_c_can_dev(struct net_device *dev) struct c_can_priv *priv = netdev_priv(dev); int err; + /* Deactivate pins to prevent DRA7 DCAN IP from being + * stuck in transition when module is disabled. + * Pins are activated in c_can_start() and deactivated + * in c_can_stop() + */ + pinctrl_pm_select_sleep_state(dev->dev.parent); + c_can_pm_runtime_enable(priv); dev->flags |= IFF_ECHO; /* we support local echo */ diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 99ad1aa576b045197f82780d64936f3b7fe5651a..8acdc7fa4792f24438cb0cd84d73b44f0c46f7d8 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -169,6 +169,28 @@ enum c_can_dev_id { BOSCH_D_CAN, }; +struct raminit_bits { + u8 start; + u8 done; +}; + +struct c_can_driver_data { + enum c_can_dev_id id; + + /* RAMINIT register description. Optional. */ + const struct raminit_bits *raminit_bits; /* Array of START/DONE bit positions */ + u8 raminit_num; /* Number of CAN instances on the SoC */ + bool raminit_pulse; /* If set, sets and clears START bit (pulse) */ +}; + +/* Out of band RAMINIT register access via syscon regmap */ +struct c_can_raminit { + struct regmap *syscon; /* for raminit ctrl. reg. access */ + unsigned int reg; /* register index within syscon */ + struct raminit_bits bits; + bool needs_pulse; +}; + /* c_can private data structure */ struct c_can_priv { struct can_priv can; /* must be the first member */ @@ -186,8 +208,7 @@ struct c_can_priv { const u16 *regs; void *priv; /* for board-specific data */ enum c_can_dev_id type; - u32 __iomem *raminit_ctrlreg; - int instance; + struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */ void (*raminit) (const struct c_can_priv *priv, bool enable); u32 comm_rcv_high; u32 rxmasked; diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index fb279d6ae484ffe3c66f45ab6a8cbb339ef0808f..a4535d2142a727718036c5c7deab949c960c53dc 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -32,14 +32,13 @@ #include #include #include +#include +#include #include #include "c_can.h" -#define CAN_RAMINIT_START_MASK(i) (0x001 << (i)) -#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i)) -#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i)) #define DCAN_RAM_INIT_BIT (1 << 3) static DEFINE_SPINLOCK(raminit_lock); /* @@ -72,39 +71,63 @@ static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv, writew(val, priv->base + 2 * priv->regs[index]); } -static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask, - u32 val) +static void c_can_hw_raminit_wait_syscon(const struct c_can_priv *priv, + u32 mask, u32 val) { + const struct c_can_raminit *raminit = &priv->raminit_sys; + int timeout = 0; + u32 ctrl = 0; + /* We look only at the bits of our instance. */ val &= mask; - while ((readl(priv->raminit_ctrlreg) & mask) != val) + do { udelay(1); + timeout++; + + regmap_read(raminit->syscon, raminit->reg, &ctrl); + if (timeout == 1000) { + dev_err(&priv->dev->dev, "%s: time out\n", __func__); + break; + } + } while ((ctrl & mask) != val); } -static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable) +static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable) { - u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance); - u32 ctrl; + const struct c_can_raminit *raminit = &priv->raminit_sys; + u32 ctrl = 0; + u32 mask; spin_lock(&raminit_lock); - ctrl = readl(priv->raminit_ctrlreg); + mask = 1 << raminit->bits.start | 1 << raminit->bits.done; + regmap_read(raminit->syscon, raminit->reg, &ctrl); + /* We clear the done and start bit first. The start bit is * looking at the 0 -> transition, but is not self clearing; * And we clear the init done bit as well. + * NOTE: DONE must be written with 1 to clear it. */ - ctrl &= ~CAN_RAMINIT_START_MASK(priv->instance); - ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); - writel(ctrl, priv->raminit_ctrlreg); - ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); - c_can_hw_raminit_wait_ti(priv, mask, ctrl); + ctrl &= ~(1 << raminit->bits.start); + ctrl |= 1 << raminit->bits.done; + regmap_write(raminit->syscon, raminit->reg, ctrl); + + ctrl &= ~(1 << raminit->bits.done); + c_can_hw_raminit_wait_syscon(priv, mask, ctrl); if (enable) { /* Set start bit and wait for the done bit. */ - ctrl |= CAN_RAMINIT_START_MASK(priv->instance); - writel(ctrl, priv->raminit_ctrlreg); - ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); - c_can_hw_raminit_wait_ti(priv, mask, ctrl); + ctrl |= 1 << raminit->bits.start; + regmap_write(raminit->syscon, raminit->reg, ctrl); + + /* clear START bit if start pulse is needed */ + if (raminit->needs_pulse) { + ctrl &= ~(1 << raminit->bits.start); + regmap_write(raminit->syscon, raminit->reg, ctrl); + } + + ctrl |= 1 << raminit->bits.done; + c_can_hw_raminit_wait_syscon(priv, mask, ctrl); } spin_unlock(&raminit_lock); } @@ -159,26 +182,60 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable) } } +static const struct c_can_driver_data c_can_drvdata = { + .id = BOSCH_C_CAN, +}; + +static const struct c_can_driver_data d_can_drvdata = { + .id = BOSCH_D_CAN, +}; + +static const struct raminit_bits dra7_raminit_bits[] = { + [0] = { .start = 3, .done = 1, }, + [1] = { .start = 5, .done = 2, }, +}; + +static const struct c_can_driver_data dra7_dcan_drvdata = { + .id = BOSCH_D_CAN, + .raminit_num = ARRAY_SIZE(dra7_raminit_bits), + .raminit_bits = dra7_raminit_bits, + .raminit_pulse = true, +}; + +static const struct raminit_bits am3352_raminit_bits[] = { + [0] = { .start = 0, .done = 8, }, + [1] = { .start = 1, .done = 9, }, +}; + +static const struct c_can_driver_data am3352_dcan_drvdata = { + .id = BOSCH_D_CAN, + .raminit_num = ARRAY_SIZE(am3352_raminit_bits), + .raminit_bits = am3352_raminit_bits, +}; + static struct platform_device_id c_can_id_table[] = { - [BOSCH_C_CAN_PLATFORM] = { + { .name = KBUILD_MODNAME, - .driver_data = BOSCH_C_CAN, + .driver_data = (kernel_ulong_t)&c_can_drvdata, }, - [BOSCH_C_CAN] = { + { .name = "c_can", - .driver_data = BOSCH_C_CAN, + .driver_data = (kernel_ulong_t)&c_can_drvdata, }, - [BOSCH_D_CAN] = { + { .name = "d_can", - .driver_data = BOSCH_D_CAN, - }, { - } + .driver_data = (kernel_ulong_t)&d_can_drvdata, + }, + { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, c_can_id_table); static const struct of_device_id c_can_of_table[] = { - { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] }, - { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] }, + { .compatible = "bosch,c_can", .data = &c_can_drvdata }, + { .compatible = "bosch,d_can", .data = &d_can_drvdata }, + { .compatible = "ti,dra7-d_can", .data = &dra7_dcan_drvdata }, + { .compatible = "ti,am3352-d_can", .data = &am3352_dcan_drvdata }, + { .compatible = "ti,am4372-d_can", .data = &am3352_dcan_drvdata }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, c_can_of_table); @@ -190,21 +247,20 @@ static int c_can_plat_probe(struct platform_device *pdev) struct net_device *dev; struct c_can_priv *priv; const struct of_device_id *match; - const struct platform_device_id *id; - struct resource *mem, *res; + struct resource *mem; int irq; struct clk *clk; - - if (pdev->dev.of_node) { - match = of_match_device(c_can_of_table, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Failed to find matching dt id\n"); - ret = -EINVAL; - goto exit; - } - id = match->data; + const struct c_can_driver_data *drvdata; + struct device_node *np = pdev->dev.of_node; + + match = of_match_device(c_can_of_table, &pdev->dev); + if (match) { + drvdata = match->data; + } else if (pdev->id_entry->driver_data) { + drvdata = (struct c_can_driver_data *) + platform_get_device_id(pdev)->driver_data; } else { - id = platform_get_device_id(pdev); + return -ENODEV; } /* get the appropriate clk */ @@ -236,7 +292,7 @@ static int c_can_plat_probe(struct platform_device *pdev) } priv = netdev_priv(dev); - switch (id->driver_data) { + switch (drvdata->id) { case BOSCH_C_CAN: priv->regs = reg_map_c_can; switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) { @@ -263,27 +319,50 @@ static int c_can_plat_probe(struct platform_device *pdev) priv->read_reg32 = d_can_plat_read_reg32; priv->write_reg32 = d_can_plat_write_reg32; - if (pdev->dev.of_node) - priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can"); - else - priv->instance = pdev->id; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - /* Not all D_CAN modules have a separate register for the D_CAN - * RAM initialization. Use default RAM init bit in D_CAN module - * if not specified in DT. + /* Check if we need custom RAMINIT via syscon. Mostly for TI + * platforms. Only supported with DT boot. */ - if (!res) { + if (np && of_property_read_bool(np, "syscon-raminit")) { + u32 id; + struct c_can_raminit *raminit = &priv->raminit_sys; + + ret = -EINVAL; + raminit->syscon = syscon_regmap_lookup_by_phandle(np, + "syscon-raminit"); + if (IS_ERR(raminit->syscon)) { + /* can fail with -EPROBE_DEFER */ + ret = PTR_ERR(raminit->syscon); + free_c_can_dev(dev); + return ret; + } + + if (of_property_read_u32_index(np, "syscon-raminit", 1, + &raminit->reg)) { + dev_err(&pdev->dev, + "couldn't get the RAMINIT reg. offset!\n"); + goto exit_free_device; + } + + if (of_property_read_u32_index(np, "syscon-raminit", 2, + &id)) { + dev_err(&pdev->dev, + "couldn't get the CAN instance ID\n"); + goto exit_free_device; + } + + if (id >= drvdata->raminit_num) { + dev_err(&pdev->dev, + "Invalid CAN instance ID\n"); + goto exit_free_device; + } + + raminit->bits = drvdata->raminit_bits[id]; + raminit->needs_pulse = drvdata->raminit_pulse; + + priv->raminit = c_can_hw_raminit_syscon; + } else { priv->raminit = c_can_hw_raminit; - break; } - - priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!priv->raminit_ctrlreg || priv->instance < 0) - dev_info(&pdev->dev, "control memory is not used for raminit\n"); - else - priv->raminit = c_can_hw_raminit_ti; break; default: ret = -EINVAL; @@ -295,7 +374,7 @@ static int c_can_plat_probe(struct platform_device *pdev) priv->device = &pdev->dev; priv->can.clock.freq = clk_get_rate(clk); priv->priv = clk; - priv->type = id->driver_data; + priv->type = drvdata->id; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index d8379278d648a2160f63d386eb5056dc47d70b7a..c486fe510f370957944f9082e1712cfa3cb7aa8f 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -60,7 +60,7 @@ MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver"); * * The message objects 1..14 can be used for TX and RX while the message * objects 15 is optimized for RX. It has a shadow register for reliable - * data receiption under heavy bus load. Therefore it makes sense to use + * data reception under heavy bus load. Therefore it makes sense to use * this message object for the needed use case. The frame type (EFF/SFF) * for the message object 15 can be defined via kernel module parameter * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames, diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 2cfe5012e4e58c3c4c51b968fba5c9c029e0db44..3ec8f6f25e5f979e16838930295fe4cd66b2841c 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -273,6 +273,84 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, return err; } +static void can_update_state_error_stats(struct net_device *dev, + enum can_state new_state) +{ + struct can_priv *priv = netdev_priv(dev); + + if (new_state <= priv->state) + return; + + switch (new_state) { + case CAN_STATE_ERROR_WARNING: + priv->can_stats.error_warning++; + break; + case CAN_STATE_ERROR_PASSIVE: + priv->can_stats.error_passive++; + break; + case CAN_STATE_BUS_OFF: + default: + break; + }; +} + +static int can_tx_state_to_frame(struct net_device *dev, enum can_state state) +{ + switch (state) { + case CAN_STATE_ERROR_ACTIVE: + return CAN_ERR_CRTL_ACTIVE; + case CAN_STATE_ERROR_WARNING: + return CAN_ERR_CRTL_TX_WARNING; + case CAN_STATE_ERROR_PASSIVE: + return CAN_ERR_CRTL_TX_PASSIVE; + default: + return 0; + } +} + +static int can_rx_state_to_frame(struct net_device *dev, enum can_state state) +{ + switch (state) { + case CAN_STATE_ERROR_ACTIVE: + return CAN_ERR_CRTL_ACTIVE; + case CAN_STATE_ERROR_WARNING: + return CAN_ERR_CRTL_RX_WARNING; + case CAN_STATE_ERROR_PASSIVE: + return CAN_ERR_CRTL_RX_PASSIVE; + default: + return 0; + } +} + +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state) +{ + struct can_priv *priv = netdev_priv(dev); + enum can_state new_state = max(tx_state, rx_state); + + if (unlikely(new_state == priv->state)) { + netdev_warn(dev, "%s: oops, state did not change", __func__); + return; + } + + netdev_dbg(dev, "New error state: %d\n", new_state); + + can_update_state_error_stats(dev, new_state); + priv->state = new_state; + + if (unlikely(new_state == CAN_STATE_BUS_OFF)) { + cf->can_id |= CAN_ERR_BUSOFF; + return; + } + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= tx_state >= rx_state ? + can_tx_state_to_frame(dev, tx_state) : 0; + cf->data[1] |= tx_state <= rx_state ? + can_rx_state_to_frame(dev, rx_state) : 0; +} +EXPORT_SYMBOL_GPL(can_change_state); + /* * Local echo of CAN messages * diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 60f86bd0434af7cd93c394677809a3e60932d768..dde05486bc9917fb88126b79933c53d5868bd0e7 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -577,98 +577,30 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) return 1; } -static void do_state(struct net_device *dev, - struct can_frame *cf, enum can_state new_state) -{ - struct flexcan_priv *priv = netdev_priv(dev); - struct can_berr_counter bec; - - __flexcan_get_berr_counter(dev, &bec); - - switch (priv->can.state) { - case CAN_STATE_ERROR_ACTIVE: - /* - * from: ERROR_ACTIVE - * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF - * => : there was a warning int - */ - if (new_state >= CAN_STATE_ERROR_WARNING && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Warning IRQ\n"); - priv->can.can_stats.error_warning++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - } - case CAN_STATE_ERROR_WARNING: /* fallthrough */ - /* - * from: ERROR_ACTIVE, ERROR_WARNING - * to : ERROR_PASSIVE, BUS_OFF - * => : error passive int - */ - if (new_state >= CAN_STATE_ERROR_PASSIVE && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Passive IRQ\n"); - priv->can.can_stats.error_passive++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - } - break; - case CAN_STATE_BUS_OFF: - netdev_err(dev, "BUG! " - "hardware recovered automatically from BUS_OFF\n"); - break; - default: - break; - } - - /* process state changes depending on the new state */ - switch (new_state) { - case CAN_STATE_ERROR_WARNING: - netdev_dbg(dev, "Error Warning\n"); - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - break; - case CAN_STATE_ERROR_ACTIVE: - netdev_dbg(dev, "Error Active\n"); - cf->can_id |= CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_ACTIVE; - break; - case CAN_STATE_BUS_OFF: - cf->can_id |= CAN_ERR_BUSOFF; - can_bus_off(dev); - break; - default: - break; - } -} - static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; - enum can_state new_state; + enum can_state new_state = 0, rx_state = 0, tx_state = 0; int flt; + struct can_berr_counter bec; flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { - if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN | - FLEXCAN_ESR_RX_WRN)))) - new_state = CAN_STATE_ERROR_ACTIVE; - else - new_state = CAN_STATE_ERROR_WARNING; - } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) + tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ? + CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; + rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? + CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; + new_state = max(tx_state, rx_state); + } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) { + __flexcan_get_berr_counter(dev, &bec); new_state = CAN_STATE_ERROR_PASSIVE; - else + rx_state = bec.rxerr >= bec.txerr ? new_state : 0; + tx_state = bec.rxerr <= bec.txerr ? new_state : 0; + } else { new_state = CAN_STATE_BUS_OFF; + } /* state hasn't changed */ if (likely(new_state == priv->can.state)) @@ -678,8 +610,11 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) if (unlikely(!skb)) return 0; - do_state(dev, cf, new_state); - priv->can.state = new_state; + can_change_state(dev, cf, tx_state, rx_state); + + if (unlikely(new_state == CAN_STATE_BUS_OFF)) + can_bus_off(dev); + netif_receive_skb(skb); dev->stats.rx_packets++; diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index e0c9be5e2ab74676e8e3bb9fe026b145d3f4d3ab..e36b7400d5cceaf12b416d39a0d546baa52e73a5 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -289,18 +289,15 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -/* This function returns the old state to see where we came from */ -static enum can_state check_set_state(struct net_device *dev, u8 canrflg) +static enum can_state get_new_state(struct net_device *dev, u8 canrflg) { struct mscan_priv *priv = netdev_priv(dev); - enum can_state state, old_state = priv->can.state; - if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) { - state = state_map[max(MSCAN_STATE_RX(canrflg), - MSCAN_STATE_TX(canrflg))]; - priv->can.state = state; - } - return old_state; + if (unlikely(canrflg & MSCAN_CSCIF)) + return state_map[max(MSCAN_STATE_RX(canrflg), + MSCAN_STATE_TX(canrflg))]; + + return priv->can.state; } static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) @@ -349,7 +346,7 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; - enum can_state old_state; + enum can_state new_state; netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg); frame->can_id = CAN_ERR_FLAG; @@ -363,27 +360,13 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, frame->data[1] = 0; } - old_state = check_set_state(dev, canrflg); - /* State changed */ - if (old_state != priv->can.state) { - switch (priv->can.state) { - case CAN_STATE_ERROR_WARNING: - frame->can_id |= CAN_ERR_CRTL; - priv->can.can_stats.error_warning++; - if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) < - (canrflg & MSCAN_RSTAT_MSK)) - frame->data[1] |= CAN_ERR_CRTL_RX_WARNING; - if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) < - (canrflg & MSCAN_TSTAT_MSK)) - frame->data[1] |= CAN_ERR_CRTL_TX_WARNING; - break; - case CAN_STATE_ERROR_PASSIVE: - frame->can_id |= CAN_ERR_CRTL; - priv->can.can_stats.error_passive++; - frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; - break; - case CAN_STATE_BUS_OFF: - frame->can_id |= CAN_ERR_BUSOFF; + new_state = get_new_state(dev, canrflg); + if (new_state != priv->can.state) { + can_change_state(dev, frame, + state_map[MSCAN_STATE_TX(canrflg)], + state_map[MSCAN_STATE_RX(canrflg)]); + + if (priv->can.state == CAN_STATE_BUS_OFF) { /* * The MSCAN on the MPC5200 does recover from bus-off * automatically. To avoid that we stop the chip doing @@ -396,9 +379,6 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, MSCAN_SLPRQ | MSCAN_INITRQ); } can_bus_off(dev); - break; - default: - break; } } priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index b27ac6074afb1d21ab63282298002f8a4992599b..32bd7f451aa42b53f0a479af667861db6693d619 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -392,12 +392,20 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) struct can_frame *cf; struct sk_buff *skb; enum can_state state = priv->can.state; + enum can_state rx_state, tx_state; + unsigned int rxerr, txerr; uint8_t ecc, alc; skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) return -ENOMEM; + txerr = priv->read_reg(priv, SJA1000_TXERR); + rxerr = priv->read_reg(priv, SJA1000_RXERR); + + cf->data[6] = txerr; + cf->data[7] = rxerr; + if (isrc & IRQ_DOI) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); @@ -412,13 +420,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) /* error warning interrupt */ netdev_dbg(dev, "error warning interrupt\n"); - if (status & SR_BS) { + if (status & SR_BS) state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; - can_bus_off(dev); - } else if (status & SR_ES) { + else if (status & SR_ES) state = CAN_STATE_ERROR_WARNING; - } else + else state = CAN_STATE_ERROR_ACTIVE; } if (isrc & IRQ_BEI) { @@ -452,10 +458,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) if (isrc & IRQ_EPI) { /* error passive interrupt */ netdev_dbg(dev, "error passive interrupt\n"); - if (status & SR_ES) - state = CAN_STATE_ERROR_PASSIVE; + + if (state == CAN_STATE_ERROR_PASSIVE) + state = CAN_STATE_ERROR_WARNING; else - state = CAN_STATE_ERROR_ACTIVE; + state = CAN_STATE_ERROR_PASSIVE; } if (isrc & IRQ_ALI) { /* arbitration lost interrupt */ @@ -467,27 +474,15 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) cf->data[0] = alc & 0x1f; } - if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING || - state == CAN_STATE_ERROR_PASSIVE)) { - uint8_t rxerr = priv->read_reg(priv, SJA1000_RXERR); - uint8_t txerr = priv->read_reg(priv, SJA1000_TXERR); - cf->can_id |= CAN_ERR_CRTL; - if (state == CAN_STATE_ERROR_WARNING) { - priv->can.can_stats.error_warning++; - cf->data[1] = (txerr > rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - } else { - priv->can.can_stats.error_passive++; - cf->data[1] = (txerr > rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - } - cf->data[6] = txerr; - cf->data[7] = rxerr; - } + if (state != priv->can.state) { + tx_state = txerr >= rxerr ? state : 0; + rx_state = txerr <= rxerr ? state : 0; - priv->can.state = state; + can_change_state(dev, cf, tx_state, rx_state); + + if(state == CAN_STATE_BUS_OFF) + can_bus_off(dev); + } netif_rx(skb); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index acb5b92ace92da17f55a5d892e90e7d7530a1952..c837eb91d43e306304d1a1347a8d8f9ada6cb02f 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -56,9 +56,6 @@ #include #include -static __initconst const char banner[] = - KERN_INFO "slcan: serial line CAN interface driver\n"; - MODULE_ALIAS_LDISC(N_SLCAN); MODULE_DESCRIPTION("serial line CAN interface"); MODULE_LICENSE("GPL"); @@ -702,8 +699,8 @@ static int __init slcan_init(void) if (maxdev < 4) maxdev = 4; /* Sanity */ - printk(banner); - printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev); + pr_info("slcan: serial line CAN interface driver\n"); + pr_info("slcan: %d dynamic interface channels.\n", maxdev); slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL); if (!slcan_devs) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 925ab8ec9329b5bbfadb6e94faa12de0b7f91ceb..4e1659d07979b77e231040509608b043dba4841b 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -316,7 +316,7 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number) if (err) { netdev_err(dev->netdev, "getting serial failure: %d\n", err); } else if (serial_number) { - u32 tmp32; + __le32 tmp32; memcpy(&tmp32, args, 4); *serial_number = le32_to_cpu(tmp32); @@ -347,7 +347,7 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id) */ static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc) { - u16 tmp16; + __le16 tmp16; if ((mc->ptr+2) > mc->end) return -EINVAL; @@ -371,7 +371,7 @@ static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet) { /* only 1st packet supplies a word timestamp */ if (first_packet) { - u16 tmp16; + __le16 tmp16; if ((mc->ptr + 2) > mc->end) return -EINVAL; @@ -614,7 +614,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) return -ENOMEM; if (status_len & PCAN_USB_STATUSLEN_EXT_ID) { - u32 tmp32; + __le32 tmp32; if ((mc->ptr + 4) > mc->end) goto decode_failed; @@ -622,9 +622,9 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) memcpy(&tmp32, mc->ptr, 4); mc->ptr += 4; - cf->can_id = le32_to_cpu(tmp32 >> 3) | CAN_EFF_FLAG; + cf->can_id = (le32_to_cpu(tmp32) >> 3) | CAN_EFF_FLAG; } else { - u16 tmp16; + __le16 tmp16; if ((mc->ptr + 2) > mc->end) goto decode_failed; @@ -632,7 +632,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) memcpy(&tmp16, mc->ptr, 2); mc->ptr += 2; - cf->can_id = le16_to_cpu(tmp16 >> 5); + cf->can_id = le16_to_cpu(tmp16) >> 5; } cf->can_dlc = get_can_dlc(rec_len); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 644e6ab8a489b162ba399a01faa10cc5b09cf3bb..c62f48a1161d1e871550489cbf3e5b4ccd90aa1e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -735,7 +735,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { err = -ENOMEM; - goto lbl_set_intf_data; + goto lbl_free_candev; } dev->udev = usb_dev; @@ -775,7 +775,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, err = register_candev(netdev); if (err) { dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); - goto lbl_free_cmd_buf; + goto lbl_restore_intf_data; } if (dev->prev_siblings) @@ -788,14 +788,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, if (dev->adapter->dev_init) { err = dev->adapter->dev_init(dev); if (err) - goto lbl_free_cmd_buf; + goto lbl_unregister_candev; } /* set bus off */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) - goto lbl_free_cmd_buf; + goto lbl_unregister_candev; } /* get device number early */ @@ -807,11 +807,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, return 0; -lbl_free_cmd_buf: - kfree(dev->cmd_buf); +lbl_unregister_candev: + unregister_candev(netdev); -lbl_set_intf_data: +lbl_restore_intf_data: usb_set_intfdata(intf, dev->prev_siblings); + kfree(dev->cmd_buf); + +lbl_free_candev: free_candev(netdev); return err; @@ -853,6 +856,7 @@ static int peak_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); + const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct); struct peak_usb_adapter *peak_usb_adapter, **pp; int i, err = -ENOMEM; @@ -860,7 +864,7 @@ static int peak_usb_probe(struct usb_interface *intf, /* get corresponding PCAN-USB adapter */ for (pp = peak_usb_adapters_list; *pp; pp++) - if ((*pp)->device_id == usb_dev->descriptor.idProduct) + if ((*pp)->device_id == usb_id_product) break; peak_usb_adapter = *pp; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 263dd921edc42342bba78ce9f2885862f0fc3087..4cfa3b8605b19f08ecdeed869f2874e72957d5b7 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -78,8 +78,8 @@ struct pcan_usb_pro_msg { int rec_buffer_size; int rec_buffer_len; union { - u16 *rec_cnt_rd; - u32 *rec_cnt; + __le16 *rec_cnt_rd; + __le32 *rec_cnt; u8 *rec_buffer; } u; }; @@ -155,7 +155,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); - *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); + *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; memcpy(pc, va_arg(ap, int *), i); pc += i; @@ -165,7 +165,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) case PCAN_USBPRO_GETDEVID: *pc++ = va_arg(ap, int); pc += 2; - *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); + *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; break; @@ -173,21 +173,21 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) case PCAN_USBPRO_SETBUSACT: case PCAN_USBPRO_SETSILENT: *pc++ = va_arg(ap, int); - *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); + *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; break; case PCAN_USBPRO_SETLED: *pc++ = va_arg(ap, int); - *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); + *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; - *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); + *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; break; case PCAN_USBPRO_SETTS: pc++; - *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); + *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; break; @@ -200,7 +200,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) len = pc - pm->rec_ptr; if (len > 0) { - *pm->u.rec_cnt = cpu_to_le32(*pm->u.rec_cnt+1); + *pm->u.rec_cnt = cpu_to_le32(le32_to_cpu(*pm->u.rec_cnt) + 1); *pm->rec_ptr = id; pm->rec_ptr = pc; @@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; - memset(req_addr, '\0', req_size); - req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER; switch (req_id) { @@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, default: p = usb_rcvctrlpipe(dev->udev, 0); req_type |= USB_DIR_IN; + memset(req_addr, '\0', req_size); break; } @@ -572,7 +571,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxstatus *er) { - const u32 raw_status = le32_to_cpu(er->status); + const u16 raw_status = le16_to_cpu(er->status); const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f; struct peak_usb_device *dev = usb_if->dev[ctrl_idx]; struct net_device *netdev = dev->netdev; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h index 32275af547e06becb9d4150030012e52c753bd16..837cee2671326f3d14b85525f14005222a783ff1 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h @@ -33,27 +33,27 @@ /* PCAN_USBPRO_INFO_BL vendor request record type */ struct __packed pcan_usb_pro_blinfo { - u32 ctrl_type; + __le32 ctrl_type; u8 version[4]; u8 day; u8 month; u8 year; u8 dummy; - u32 serial_num_hi; - u32 serial_num_lo; - u32 hw_type; - u32 hw_rev; + __le32 serial_num_hi; + __le32 serial_num_lo; + __le32 hw_type; + __le32 hw_rev; }; /* PCAN_USBPRO_INFO_FW vendor request record type */ struct __packed pcan_usb_pro_fwinfo { - u32 ctrl_type; + __le32 ctrl_type; u8 version[4]; u8 day; u8 month; u8 year; u8 dummy; - u32 fw_type; + __le32 fw_type; }; /* @@ -80,46 +80,46 @@ struct __packed pcan_usb_pro_fwinfo { struct __packed pcan_usb_pro_btr { u8 data_type; u8 channel; - u16 dummy; - u32 CCBT; + __le16 dummy; + __le32 CCBT; }; struct __packed pcan_usb_pro_busact { u8 data_type; u8 channel; - u16 onoff; + __le16 onoff; }; struct __packed pcan_usb_pro_silent { u8 data_type; u8 channel; - u16 onoff; + __le16 onoff; }; struct __packed pcan_usb_pro_filter { u8 data_type; u8 dummy; - u16 filter_mode; + __le16 filter_mode; }; struct __packed pcan_usb_pro_setts { u8 data_type; u8 dummy; - u16 mode; + __le16 mode; }; struct __packed pcan_usb_pro_devid { u8 data_type; u8 channel; - u16 dummy; - u32 serial_num; + __le16 dummy; + __le32 serial_num; }; struct __packed pcan_usb_pro_setled { u8 data_type; u8 channel; - u16 mode; - u32 timeout; + __le16 mode; + __le32 timeout; }; struct __packed pcan_usb_pro_rxmsg { @@ -127,8 +127,8 @@ struct __packed pcan_usb_pro_rxmsg { u8 client; u8 flags; u8 len; - u32 ts32; - u32 id; + __le32 ts32; + __le32 id; u8 data[8]; }; @@ -141,15 +141,15 @@ struct __packed pcan_usb_pro_rxmsg { struct __packed pcan_usb_pro_rxstatus { u8 data_type; u8 channel; - u16 status; - u32 ts32; - u32 err_frm; + __le16 status; + __le32 ts32; + __le32 err_frm; }; struct __packed pcan_usb_pro_rxts { u8 data_type; u8 dummy[3]; - u32 ts64[2]; + __le32 ts64[2]; }; struct __packed pcan_usb_pro_txmsg { @@ -157,7 +157,7 @@ struct __packed pcan_usb_pro_txmsg { u8 client; u8 flags; u8 len; - u32 id; + __le32 id; u8 data[8]; }; diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 4e94057ef5cf55df4600496d38b5fce433f851b4..674f367087c54ac168d2c0283b1361a1c98d3696 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -50,9 +50,6 @@ #include #include -static __initconst const char banner[] = - KERN_INFO "vcan: Virtual CAN interface driver\n"; - MODULE_DESCRIPTION("virtual CAN interface"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann "); @@ -173,7 +170,7 @@ static struct rtnl_link_ops vcan_link_ops __read_mostly = { static __init int vcan_init_module(void) { - printk(banner); + pr_info("vcan: Virtual CAN interface driver\n"); if (echo) printk(KERN_INFO "vcan: enabled echo on driver level.\n"); diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 9234d808cbb35e4000a12afd4f70c33ec5a7d3fd..7cf8f4ac281f50518d0baefd9574fa392893e33e 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -37,13 +37,22 @@ config NET_DSA_MV88E6123_61_65 ethernet switch chips. config NET_DSA_MV88E6171 - tristate "Marvell 88E6171 ethernet switch chip support" + tristate "Marvell 88E6171/6172 ethernet switch chip support" select NET_DSA select NET_DSA_MV88E6XXX select NET_DSA_TAG_EDSA ---help--- - This enables support for the Marvell 88E6171 ethernet switch - chip. + This enables support for the Marvell 88E6171/6172 ethernet switch + chips. + +config NET_DSA_MV88E6352 + tristate "Marvell 88E6176/88E6352 ethernet switch chip support" + select NET_DSA + select NET_DSA_MV88E6XXX + select NET_DSA_TAG_EDSA + ---help--- + This enables support for the Marvell 88E6176 and 88E6352 ethernet + switch chips. config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 23a90de9830e87cdec4f3ade882ca41e9e188f48..e2d51c4b93821ff8d0e5f21fefab290b0028f38c 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -7,6 +7,9 @@ endif ifdef CONFIG_NET_DSA_MV88E6131 mv88e6xxx_drv-y += mv88e6131.o endif +ifdef CONFIG_NET_DSA_MV88E6352 +mv88e6xxx_drv-y += mv88e6352.o +endif ifdef CONFIG_NET_DSA_MV88E6171 mv88e6xxx_drv-y += mv88e6171.o endif diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 05b0ca3bf71d72f11515241f0cfac14ea1f2c4a6..c29aebe1e62b59801ca7bb2aed36f9c502733fbf 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -69,8 +69,11 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr) ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); if (ret >= 0) { - ret &= 0xfff0; if (ret == 0x0600) + return "Marvell 88E6060 (A0)"; + if (ret == 0x0601 || ret == 0x0602) + return "Marvell 88E6060 (B0)"; + if ((ret & 0xfff0) == 0x0600) return "Marvell 88E6060"; } diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c index a332c53ff955906eaeec11edd81350b247561fed..e9c736e1cef3a20cd599f6347be20de73127a0ae 100644 --- a/drivers/net/dsa/mv88e6123_61_65.c +++ b/drivers/net/dsa/mv88e6123_61_65.c @@ -299,6 +299,7 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds) mutex_init(&ps->smi_mutex); mutex_init(&ps->stats_mutex); + mutex_init(&ps->phy_mutex); ret = mv88e6123_61_65_switch_reset(ds); if (ret < 0) @@ -329,16 +330,28 @@ static int mv88e6123_61_65_port_to_phy_addr(int port) static int mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6123_61_65_port_to_phy_addr(port); - return mv88e6xxx_phy_read(ds, addr, regnum); + int ret; + + mutex_lock(&ps->phy_mutex); + ret = mv88e6xxx_phy_read(ds, addr, regnum); + mutex_unlock(&ps->phy_mutex); + return ret; } static int mv88e6123_61_65_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6123_61_65_port_to_phy_addr(port); - return mv88e6xxx_phy_write(ds, addr, regnum, val); + int ret; + + mutex_lock(&ps->phy_mutex); + ret = mv88e6xxx_phy_write(ds, addr, regnum, val); + mutex_unlock(&ps->phy_mutex); + return ret; } static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { @@ -372,6 +385,9 @@ static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { { "hist_256_511bytes", 4, 0x0b, }, { "hist_512_1023bytes", 4, 0x0c, }, { "hist_1024_max_bytes", 4, 0x0d, }, + { "sw_in_discards", 4, 0x110, }, + { "sw_in_filtered", 2, 0x112, }, + { "sw_out_filtered", 2, 0x113, }, }; static void @@ -406,6 +422,11 @@ struct dsa_switch_driver mv88e6123_61_65_switch_driver = { .get_strings = mv88e6123_61_65_get_strings, .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats, .get_sset_count = mv88e6123_61_65_get_sset_count, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6xxx_get_temp, +#endif + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, }; MODULE_ALIAS("platform:mv88e6123"); diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 244c735014fa47421b25091efd9b70077e1089e0..1230f52aa70e8c4d31e8be1d397f59eab9c4b7eb 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -21,6 +21,7 @@ #define ID_6085 0x04a0 #define ID_6095 0x0950 #define ID_6131 0x1060 +#define ID_6131_B2 0x1066 static char *mv88e6131_probe(struct device *host_dev, int sw_addr) { @@ -32,12 +33,15 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr) ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); if (ret >= 0) { - ret &= 0xfff0; - if (ret == ID_6085) + int ret_masked = ret & 0xfff0; + + if (ret_masked == ID_6085) return "Marvell 88E6085"; - if (ret == ID_6095) + if (ret_masked == ID_6095) return "Marvell 88E6095/88E6095F"; - if (ret == ID_6131) + if (ret == ID_6131_B2) + return "Marvell 88E6131 (B2)"; + if (ret_masked == ID_6131) return "Marvell 88E6131"; } diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 78d8e876f3aaada8ae88c429d007d4c8e3361633..aa33d16f2e22ec6b48f6332db928c9899ce82817 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -1,4 +1,4 @@ -/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support +/* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support * Copyright (c) 2008-2009 Marvell Semiconductor * Copyright (c) 2014 Claudio Leite * @@ -29,6 +29,8 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr) if (ret >= 0) { if ((ret & 0xfff0) == 0x1710) return "Marvell 88E6171"; + if ((ret & 0xfff0) == 0x1720) + return "Marvell 88E6172"; } return NULL; @@ -314,6 +316,8 @@ static int mv88e6171_setup(struct dsa_switch *ds) return ret; } + mutex_init(&ps->phy_mutex); + return 0; } @@ -327,18 +331,28 @@ static int mv88e6171_port_to_phy_addr(int port) static int mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6171_port_to_phy_addr(port); + int ret; - return mv88e6xxx_phy_read(ds, addr, regnum); + mutex_lock(&ps->phy_mutex); + ret = mv88e6xxx_phy_read(ds, addr, regnum); + mutex_unlock(&ps->phy_mutex); + return ret; } static int mv88e6171_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int addr = mv88e6171_port_to_phy_addr(port); + int ret; - return mv88e6xxx_phy_write(ds, addr, regnum, val); + mutex_lock(&ps->phy_mutex); + ret = mv88e6xxx_phy_write(ds, addr, regnum, val); + mutex_unlock(&ps->phy_mutex); + return ret; } static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = { @@ -406,6 +420,12 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .get_strings = mv88e6171_get_strings, .get_ethtool_stats = mv88e6171_get_ethtool_stats, .get_sset_count = mv88e6171_get_sset_count, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6xxx_get_temp, +#endif + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, }; MODULE_ALIAS("platform:mv88e6171"); +MODULE_ALIAS("platform:mv88e6172"); diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c new file mode 100644 index 0000000000000000000000000000000000000000..258d9ef5ef254e49a0fe27586049bbc2a59ebc61 --- /dev/null +++ b/drivers/net/dsa/mv88e6352.c @@ -0,0 +1,788 @@ +/* + * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support + * + * Copyright (c) 2014 Guenter Roeck + * + * Derived from mv88e6123_61_65.c + * Copyright (c) 2008-2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "mv88e6xxx.h" + +static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask) +{ + unsigned long timeout = jiffies + HZ / 10; + + while (time_before(jiffies, timeout)) { + int ret; + + ret = REG_READ(REG_GLOBAL2, reg); + if (ret < 0) + return ret; + + if (!(ret & mask)) + return 0; + + usleep_range(1000, 2000); + } + return -ETIMEDOUT; +} + +static inline int mv88e6352_phy_wait(struct dsa_switch *ds) +{ + return mv88e6352_wait(ds, 0x18, 0x8000); +} + +static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds) +{ + return mv88e6352_wait(ds, 0x14, 0x0800); +} + +static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds) +{ + return mv88e6352_wait(ds, 0x14, 0x8000); +} + +static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum) +{ + int ret; + + REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); + + ret = mv88e6352_phy_wait(ds); + if (ret < 0) + return ret; + + return REG_READ(REG_GLOBAL2, 0x19); +} + +static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum, + u16 val) +{ + REG_WRITE(REG_GLOBAL2, 0x19, val); + REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); + + return mv88e6352_phy_wait(ds); +} + +static char *mv88e6352_probe(struct device *host_dev, int sw_addr) +{ + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); + int ret; + + if (bus == NULL) + return NULL; + + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); + if (ret >= 0) { + if ((ret & 0xfff0) == 0x1760) + return "Marvell 88E6176"; + if (ret == 0x3521) + return "Marvell 88E6352 (A0)"; + if (ret == 0x3522) + return "Marvell 88E6352 (A1)"; + if ((ret & 0xfff0) == 0x3520) + return "Marvell 88E6352"; + } + + return NULL; +} + +static int mv88e6352_switch_reset(struct dsa_switch *ds) +{ + unsigned long timeout; + int ret; + int i; + + /* Set all ports to the disabled state. */ + for (i = 0; i < 7; i++) { + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + } + + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); + + /* Reset the switch. Keep PPU active (bit 14, undocumented). + * The PPU needs to be active to support indirect phy register + * accesses through global registers 0x18 and 0x19. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0xc000); + + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = REG_READ(REG_GLOBAL, 0x00); + if ((ret & 0x8800) == 0x8800) + break; + usleep_range(1000, 2000); + } + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + return 0; +} + +static int mv88e6352_setup_global(struct dsa_switch *ds) +{ + int ret; + int i; + + /* Discard packets with excessive collisions, + * mask all interrupt sources, enable PPU (bit 14, undocumented). + */ + REG_WRITE(REG_GLOBAL, 0x04, 0x6000); + + /* Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); + + /* Configure the priority mapping registers. */ + ret = mv88e6xxx_config_prio(ds); + if (ret < 0) + return ret; + + /* Configure the upstream port, and configure the upstream + * port as the port to which ingress and egress monitor frames + * are to be sent. + */ + REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); + + /* Disable remote management for now, and set the switch's + * DSA device number. + */ + REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:2x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:0x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); + + /* Disable the loopback filter, disable flow control + * messages, disable flood broadcast override, disable + * removing of provider tags, disable ATU age violation + * interrupts, disable tag flow control, force flow + * control priority to the highest, and send all special + * multicast frames to the CPU at the highest priority. + */ + REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); + + /* Program the DSA routing table. */ + for (i = 0; i < 32; i++) { + int nexthop = 0x1f; + + if (i != ds->index && i < ds->dst->pd->nr_chips) + nexthop = ds->pd->rtable[i] & 0x1f; + + REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); + } + + /* Clear all trunk masks. */ + for (i = 0; i < 8; i++) + REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f); + + /* Clear all trunk mappings. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); + + /* Disable ingress rate limiting by resetting all ingress + * rate limit registers to their initial state. + */ + for (i = 0; i < 7; i++) + REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); + + /* Initialise cross-chip port VLAN table to reset defaults. */ + REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); + + /* Clear the priority override table. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); + + /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */ + + return 0; +} + +static int mv88e6352_setup_port(struct dsa_switch *ds, int p) +{ + int addr = REG_PORT(p); + u16 val; + + /* MAC Forcing register: don't force link, speed, duplex + * or flow control state to any particular values on physical + * ports, but force the CPU port and all DSA ports to 1000 Mb/s + * full duplex. + */ + if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) + REG_WRITE(addr, 0x01, 0x003e); + else + REG_WRITE(addr, 0x01, 0x0003); + + /* Do not limit the period of time that this port can be + * paused for by the remote end or the period of time that + * this port can pause the remote end. + */ + REG_WRITE(addr, 0x02, 0x0000); + + /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, + * disable Header mode, enable IGMP/MLD snooping, disable VLAN + * tunneling, determine priority by looking at 802.1p and IP + * priority fields (IP prio has precedence), and set STP state + * to Forwarding. + * + * If this is the CPU link, use DSA or EDSA tagging depending + * on which tagging mode was configured. + * + * If this is a link to another switch, use DSA tagging mode. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown unicasts and multicasts. + */ + val = 0x0433; + if (dsa_is_cpu_port(ds, p)) { + if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) + val |= 0x3300; + else + val |= 0x0100; + } + if (ds->dsa_port_mask & (1 << p)) + val |= 0x0100; + if (p == dsa_upstream_port(ds)) + val |= 0x000c; + REG_WRITE(addr, 0x04, val); + + /* Port Control 1: disable trunking. Also, if this is the + * CPU port, enable learn messages to be sent to this port. + */ + REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); + + /* Port based VLAN map: give each port its own address + * database, allow the CPU port to talk to each of the 'real' + * ports, and allow each of the 'real' ports to only talk to + * the upstream port. + */ + val = (p & 0xf) << 12; + if (dsa_is_cpu_port(ds, p)) + val |= ds->phys_port_mask; + else + val |= 1 << dsa_upstream_port(ds); + REG_WRITE(addr, 0x06, val); + + /* Default VLAN ID and priority: don't set a default VLAN + * ID, and set the default packet priority to zero. + */ + REG_WRITE(addr, 0x07, 0x0000); + + /* Port Control 2: don't force a good FCS, set the maximum + * frame size to 10240 bytes, don't let the switch add or + * strip 802.1q tags, don't discard tagged or untagged frames + * on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't + * send a copy of all transmitted/received frames on this port + * to the CPU. + */ + REG_WRITE(addr, 0x08, 0x2080); + + /* Egress rate control: disable egress rate control. */ + REG_WRITE(addr, 0x09, 0x0001); + + /* Egress rate control 2: disable egress rate control. */ + REG_WRITE(addr, 0x0a, 0x0000); + + /* Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + REG_WRITE(addr, 0x0b, 1 << p); + + /* Port ATU control: disable limiting the number of address + * database entries that this port is allowed to use. + */ + REG_WRITE(addr, 0x0c, 0x0000); + + /* Priority Override: disable DA, SA and VTU priority override. */ + REG_WRITE(addr, 0x0d, 0x0000); + + /* Port Ethertype: use the Ethertype DSA Ethertype value. */ + REG_WRITE(addr, 0x0f, ETH_P_EDSA); + + /* Tag Remap: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x18, 0x3210); + + /* Tag Remap 2: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x19, 0x7654); + + return 0; +} + +#ifdef CONFIG_NET_DSA_HWMON + +static int mv88e6352_phy_page_read(struct dsa_switch *ds, + int port, int page, int reg) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_write(ds, port, 0x16, page); + if (ret < 0) + goto error; + ret = __mv88e6352_phy_read(ds, port, reg); +error: + __mv88e6352_phy_write(ds, port, 0x16, 0x0); + mutex_unlock(&ps->phy_mutex); + return ret; +} + +static int mv88e6352_phy_page_write(struct dsa_switch *ds, + int port, int page, int reg, int val) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_write(ds, port, 0x16, page); + if (ret < 0) + goto error; + + ret = __mv88e6352_phy_write(ds, port, reg, val); +error: + __mv88e6352_phy_write(ds, port, 0x16, 0x0); + mutex_unlock(&ps->phy_mutex); + return ret; +} + +static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp) +{ + int ret; + + *temp = 0; + + ret = mv88e6352_phy_page_read(ds, 0, 6, 27); + if (ret < 0) + return ret; + + *temp = (ret & 0xff) - 25; + + return 0; +} + +static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp) +{ + int ret; + + *temp = 0; + + ret = mv88e6352_phy_page_read(ds, 0, 6, 26); + if (ret < 0) + return ret; + + *temp = (((ret >> 8) & 0x1f) * 5) - 25; + + return 0; +} + +static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp) +{ + int ret; + + ret = mv88e6352_phy_page_read(ds, 0, 6, 26); + if (ret < 0) + return ret; + temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); + return mv88e6352_phy_page_write(ds, 0, 6, 26, + (ret & 0xe0ff) | (temp << 8)); +} + +static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm) +{ + int ret; + + *alarm = false; + + ret = mv88e6352_phy_page_read(ds, 0, 6, 26); + if (ret < 0) + return ret; + + *alarm = !!(ret & 0x40); + + return 0; +} +#endif /* CONFIG_NET_DSA_HWMON */ + +static int mv88e6352_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + int i; + + mutex_init(&ps->smi_mutex); + mutex_init(&ps->stats_mutex); + mutex_init(&ps->phy_mutex); + mutex_init(&ps->eeprom_mutex); + + ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; + + ret = mv88e6352_switch_reset(ds); + if (ret < 0) + return ret; + + /* @@@ initialise vtu and atu */ + + ret = mv88e6352_setup_global(ds); + if (ret < 0) + return ret; + + for (i = 0; i < 7; i++) { + ret = mv88e6352_setup_port(ds, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6352_port_to_phy_addr(int port) +{ + if (port >= 0 && port <= 4) + return port; + return -EINVAL; +} + +static int +mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int addr = mv88e6352_port_to_phy_addr(port); + int ret; + + if (addr < 0) + return addr; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_read(ds, addr, regnum); + mutex_unlock(&ps->phy_mutex); + + return ret; +} + +static int +mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int addr = mv88e6352_port_to_phy_addr(port); + int ret; + + if (addr < 0) + return addr; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_write(ds, addr, regnum, val); + mutex_unlock(&ps->phy_mutex); + + return ret; +} + +static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = { + { "in_good_octets", 8, 0x00, }, + { "in_bad_octets", 4, 0x02, }, + { "in_unicast", 4, 0x04, }, + { "in_broadcasts", 4, 0x06, }, + { "in_multicasts", 4, 0x07, }, + { "in_pause", 4, 0x16, }, + { "in_undersize", 4, 0x18, }, + { "in_fragments", 4, 0x19, }, + { "in_oversize", 4, 0x1a, }, + { "in_jabber", 4, 0x1b, }, + { "in_rx_error", 4, 0x1c, }, + { "in_fcs_error", 4, 0x1d, }, + { "out_octets", 8, 0x0e, }, + { "out_unicast", 4, 0x10, }, + { "out_broadcasts", 4, 0x13, }, + { "out_multicasts", 4, 0x12, }, + { "out_pause", 4, 0x15, }, + { "excessive", 4, 0x11, }, + { "collisions", 4, 0x1e, }, + { "deferred", 4, 0x05, }, + { "single", 4, 0x14, }, + { "multiple", 4, 0x17, }, + { "out_fcs_error", 4, 0x03, }, + { "late", 4, 0x1f, }, + { "hist_64bytes", 4, 0x08, }, + { "hist_65_127bytes", 4, 0x09, }, + { "hist_128_255bytes", 4, 0x0a, }, + { "hist_256_511bytes", 4, 0x0b, }, + { "hist_512_1023bytes", 4, 0x0c, }, + { "hist_1024_max_bytes", 4, 0x0d, }, + { "sw_in_discards", 4, 0x110, }, + { "sw_in_filtered", 2, 0x112, }, + { "sw_out_filtered", 2, 0x113, }, +}; + +static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->eeprom_mutex); + + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, + 0xc000 | (addr & 0xff)); + if (ret < 0) + goto error; + + ret = mv88e6352_eeprom_busy_wait(ds); + if (ret < 0) + goto error; + + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15); +error: + mutex_unlock(&ps->eeprom_mutex); + return ret; +} + +static int mv88e6352_get_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int offset; + int len; + int ret; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + eeprom->magic = 0xc3ec4951; + + ret = mv88e6352_eeprom_load_wait(ds); + if (ret < 0) + return ret; + + if (offset & 1) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + *data++ = (word >> 8) & 0xff; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + *data++ = word & 0xff; + *data++ = (word >> 8) & 0xff; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + *data++ = word & 0xff; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + +static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds) +{ + int ret; + + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14); + if (ret < 0) + return ret; + + if (!(ret & 0x0400)) + return -EROFS; + + return 0; +} + +static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, + u16 data) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->eeprom_mutex); + + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data); + if (ret < 0) + goto error; + + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, + 0xb000 | (addr & 0xff)); + if (ret < 0) + goto error; + + ret = mv88e6352_eeprom_busy_wait(ds); +error: + mutex_unlock(&ps->eeprom_mutex); + return ret; +} + +static int mv88e6352_set_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int offset; + int ret; + int len; + + if (eeprom->magic != 0xc3ec4951) + return -EINVAL; + + ret = mv88e6352_eeprom_is_readonly(ds); + if (ret) + return ret; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + ret = mv88e6352_eeprom_load_wait(ds); + if (ret < 0) + return ret; + + if (offset & 1) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + word = (*data++ << 8) | (word & 0xff); + + ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); + if (ret < 0) + return ret; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + int word; + + word = *data++; + word |= *data++ << 8; + + ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); + if (ret < 0) + return ret; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + int word; + + word = mv88e6352_read_eeprom_word(ds, offset >> 1); + if (word < 0) + return word; + + word = (word & 0xff00) | *data++; + + ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); + if (ret < 0) + return ret; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + +static void +mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats), + mv88e6352_hw_stats, port, data); +} + +static void +mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats), + mv88e6352_hw_stats, port, data); +} + +static int mv88e6352_get_sset_count(struct dsa_switch *ds) +{ + return ARRAY_SIZE(mv88e6352_hw_stats); +} + +struct dsa_switch_driver mv88e6352_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_EDSA, + .priv_size = sizeof(struct mv88e6xxx_priv_state), + .probe = mv88e6352_probe, + .setup = mv88e6352_setup, + .set_addr = mv88e6xxx_set_addr_indirect, + .phy_read = mv88e6352_phy_read, + .phy_write = mv88e6352_phy_write, + .poll_link = mv88e6xxx_poll_link, + .get_strings = mv88e6352_get_strings, + .get_ethtool_stats = mv88e6352_get_ethtool_stats, + .get_sset_count = mv88e6352_get_sset_count, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6352_get_temp, + .get_temp_limit = mv88e6352_get_temp_limit, + .set_temp_limit = mv88e6352_set_temp_limit, + .get_temp_alarm = mv88e6352_get_temp_alarm, +#endif + .get_eeprom = mv88e6352_get_eeprom, + .set_eeprom = mv88e6352_set_eeprom, + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, +}; + +MODULE_ALIAS("platform:mv88e6352"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index a6c90cf5634d899e745f97b763299018d49e049c..cd6807c6b4eddaec7eb57ebc2107c0290ff1018f 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -485,20 +485,108 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, for (i = 0; i < nr_stats; i++) { struct mv88e6xxx_hw_stat *s = stats + i; u32 low; - u32 high; - + u32 high = 0; + + if (s->reg >= 0x100) { + int ret; + + ret = mv88e6xxx_reg_read(ds, REG_PORT(port), + s->reg - 0x100); + if (ret < 0) + goto error; + low = ret; + if (s->sizeof_stat == 4) { + ret = mv88e6xxx_reg_read(ds, REG_PORT(port), + s->reg - 0x100 + 1); + if (ret < 0) + goto error; + high = ret; + } + data[i] = (((u64)high) << 16) | low; + continue; + } mv88e6xxx_stats_read(ds, s->reg, &low); if (s->sizeof_stat == 8) mv88e6xxx_stats_read(ds, s->reg + 1, &high); - else - high = 0; data[i] = (((u64)high) << 32) | low; } - +error: mutex_unlock(&ps->stats_mutex); } +int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) +{ + return 32 * sizeof(u16); +} + +void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *_p) +{ + u16 *p = _p; + int i; + + regs->version = 0; + + memset(p, 0xff, 32 * sizeof(u16)); + + for (i = 0; i < 32; i++) { + int ret; + + ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i); + if (ret >= 0) + p[i] = ret; + } +} + +#ifdef CONFIG_NET_DSA_HWMON + +int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + int val; + + *temp = 0; + + mutex_lock(&ps->phy_mutex); + + ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6); + if (ret < 0) + goto error; + + /* Enable temperature sensor */ + ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a); + if (ret < 0) + goto error; + + ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5)); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + val = mv88e6xxx_phy_read(ds, 0x0, 0x1a); + if (val < 0) { + ret = val; + goto error; + } + + /* Disable temperature sensor */ + ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5)); + if (ret < 0) + goto error; + + *temp = ((val & 0x1f) - 5) * 5; + +error: + mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0); + mutex_unlock(&ps->phy_mutex); + return ret; +} +#endif /* CONFIG_NET_DSA_HWMON */ + static int __init mv88e6xxx_init(void) { #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) @@ -507,6 +595,9 @@ static int __init mv88e6xxx_init(void) #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) register_switch_driver(&mv88e6123_61_65_switch_driver); #endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) + register_switch_driver(&mv88e6352_switch_driver); +#endif #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) register_switch_driver(&mv88e6171_switch_driver); #endif diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 5e5145ad95259cffe069500333f270e6de5f04e6..03e397efde36949810503a16b601f3df61f3396c 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -37,6 +37,17 @@ struct mv88e6xxx_priv_state { */ struct mutex stats_mutex; + /* This mutex serializes phy access for chips with + * indirect phy addressing. It is unused for chips + * with direct phy access. + */ + struct mutex phy_mutex; + + /* This mutex serializes eeprom access for chips with + * eeprom support. + */ + struct mutex eeprom_mutex; + int id; /* switch product id */ }; @@ -67,9 +78,14 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds, void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int nr_stats, struct mv88e6xxx_hw_stat *stats, int port, uint64_t *data); +int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); +void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *_p); +int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; +extern struct dsa_switch_driver mv88e6352_switch_driver; extern struct dsa_switch_driver mv88e6171_switch_driver; #define REG_READ(addr, reg) \ diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index ff435fbd1ad0e8b32a8299211a890b1f5b9deb31..49adbf1b7574211dcd97db2c8cc970e427934c87 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -38,6 +38,9 @@ #include #include +#define DRV_NAME "dummy" +#define DRV_VERSION "1.0" + static int numdummies = 1; /* fake multicast ability */ @@ -120,12 +123,24 @@ static const struct net_device_ops dummy_netdev_ops = { .ndo_change_carrier = dummy_change_carrier, }; +static void dummy_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static const struct ethtool_ops dummy_ethtool_ops = { + .get_drvinfo = dummy_get_drvinfo, +}; + static void dummy_setup(struct net_device *dev) { ether_setup(dev); /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; + dev->ethtool_ops = &dummy_ethtool_ops; dev->destructor = free_netdev; /* Fill in device structure with ethernet-generic values. */ @@ -150,7 +165,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) } static struct rtnl_link_ops dummy_link_ops __read_mostly = { - .kind = "dummy", + .kind = DRV_NAME, .setup = dummy_setup, .validate = dummy_validate, }; @@ -209,4 +224,5 @@ static void __exit dummy_cleanup_module(void) module_init(dummy_init_module); module_exit(dummy_cleanup_module); MODULE_LICENSE("GPL"); -MODULE_ALIAS_RTNL_LINK("dummy"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 48775b88bac72a9de44342b4c43b04e33dead760..dede43f4ce095bbea6a85923891acae0e704563d 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -1285,7 +1285,7 @@ typhoon_request_firmware(struct typhoon *tp) return err; } - image_data = (u8 *) typhoon_fw->data; + image_data = typhoon_fw->data; remaining = typhoon_fw->size; if (remaining < sizeof(struct typhoon_file_header)) goto invalid_fw; @@ -1343,7 +1343,7 @@ typhoon_download_firmware(struct typhoon *tp) int i; int err; - image_data = (u8 *) typhoon_fw->data; + image_data = typhoon_fw->data; fHdr = (struct typhoon_file_header *) image_data; /* Cannot just map the firmware image using pci_map_single() as diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 1ed1fbba5d58140912258d80bf8279bb9a2686a1..df76050d0a9d26dc5a5b106a0ac44a18d84441b9 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -155,6 +155,7 @@ source "drivers/net/ethernet/qualcomm/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" +source "drivers/net/ethernet/rocker/Kconfig" config S6GMAC tristate "S6105 GMAC ethernet support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 6e0b629e9859a85b13d82ce3b0d9fb6a7eece3e3..bf56f8b36e90cbb2fcef22627fadff0041fef897 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_SH_ETH) += renesas/ obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ +obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ obj-$(CONFIG_S6GMAC) += s6gmac.o obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 8319c99331b0593b295b8c20e3ba53a562c60024..7a5e4aa5415e2652a13d9624a70003a5e2f6ad3d 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -179,7 +179,7 @@ config SUNLANCE config AMD_XGBE tristate "AMD 10GbE Ethernet driver" - depends on OF_NET + depends on OF_NET && HAS_IOMEM select PHYLIB select AMD_XGBE_PHY select BITREVERSE diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index caade30820d50b9851228589a192f17bb8c9a915..75b08c63d39f4a469a389c0327e1f3071932596f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -125,9 +125,6 @@ #define DMA_AXIAWCR 0x3018 #define DMA_DSR0 0x3020 #define DMA_DSR1 0x3024 -#define DMA_DSR2 0x3028 -#define DMA_DSR3 0x302c -#define DMA_DSR4 0x3030 /* DMA register entry bit positions and sizes */ #define DMA_AXIARCR_DRC_INDEX 0 @@ -158,10 +155,6 @@ #define DMA_AXIAWCR_TDC_WIDTH 4 #define DMA_AXIAWCR_TDD_INDEX 28 #define DMA_AXIAWCR_TDD_WIDTH 2 -#define DMA_DSR0_RPS_INDEX 8 -#define DMA_DSR0_RPS_WIDTH 4 -#define DMA_DSR0_TPS_INDEX 12 -#define DMA_DSR0_TPS_WIDTH 4 #define DMA_ISR_MACIS_INDEX 17 #define DMA_ISR_MACIS_WIDTH 1 #define DMA_ISR_MTLIS_INDEX 16 @@ -175,6 +168,20 @@ #define DMA_SBMR_UNDEF_INDEX 0 #define DMA_SBMR_UNDEF_WIDTH 1 +/* DMA register values */ +#define DMA_DSR_RPS_WIDTH 4 +#define DMA_DSR_TPS_WIDTH 4 +#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH) +#define DMA_DSR0_RPS_START 8 +#define DMA_DSR0_TPS_START 12 +#define DMA_DSRX_FIRST_QUEUE 3 +#define DMA_DSRX_INC 4 +#define DMA_DSRX_QPR 4 +#define DMA_DSRX_RPS_START 0 +#define DMA_DSRX_TPS_START 4 +#define DMA_TPS_STOPPED 0x00 +#define DMA_TPS_SUSPENDED 0x06 + /* DMA channel register offsets * Multiple channels can be active. The first channel has registers * that begin at 0x3100. Each subsequent channel has registers that @@ -207,6 +214,8 @@ /* DMA channel register entry bit positions and sizes */ #define DMA_CH_CR_PBLX8_INDEX 16 #define DMA_CH_CR_PBLX8_WIDTH 1 +#define DMA_CH_CR_SPH_INDEX 24 +#define DMA_CH_CR_SPH_WIDTH 1 #define DMA_CH_IER_AIE_INDEX 15 #define DMA_CH_IER_AIE_WIDTH 1 #define DMA_CH_IER_FBEE_INDEX 12 @@ -306,6 +315,9 @@ #define MAC_MACA0LR 0x0304 #define MAC_MACA1HR 0x0308 #define MAC_MACA1LR 0x030c +#define MAC_RSSCR 0x0c80 +#define MAC_RSSAR 0x0c88 +#define MAC_RSSDR 0x0c8c #define MAC_TSCR 0x0d00 #define MAC_SSIR 0x0d04 #define MAC_STSR 0x0d08 @@ -429,6 +441,8 @@ #define MAC_RCR_CST_WIDTH 1 #define MAC_RCR_DCRCC_INDEX 3 #define MAC_RCR_DCRCC_WIDTH 1 +#define MAC_RCR_HDSMS_INDEX 12 +#define MAC_RCR_HDSMS_WIDTH 3 #define MAC_RCR_IPC_INDEX 9 #define MAC_RCR_IPC_WIDTH 1 #define MAC_RCR_JE_INDEX 8 @@ -445,6 +459,24 @@ #define MAC_RFCR_UP_WIDTH 1 #define MAC_RQC0R_RXQ0EN_INDEX 0 #define MAC_RQC0R_RXQ0EN_WIDTH 2 +#define MAC_RSSAR_ADDRT_INDEX 2 +#define MAC_RSSAR_ADDRT_WIDTH 1 +#define MAC_RSSAR_CT_INDEX 1 +#define MAC_RSSAR_CT_WIDTH 1 +#define MAC_RSSAR_OB_INDEX 0 +#define MAC_RSSAR_OB_WIDTH 1 +#define MAC_RSSAR_RSSIA_INDEX 8 +#define MAC_RSSAR_RSSIA_WIDTH 8 +#define MAC_RSSCR_IP2TE_INDEX 1 +#define MAC_RSSCR_IP2TE_WIDTH 1 +#define MAC_RSSCR_RSSE_INDEX 0 +#define MAC_RSSCR_RSSE_WIDTH 1 +#define MAC_RSSCR_TCP4TE_INDEX 2 +#define MAC_RSSCR_TCP4TE_WIDTH 1 +#define MAC_RSSCR_UDP4TE_INDEX 3 +#define MAC_RSSCR_UDP4TE_WIDTH 1 +#define MAC_RSSDR_DMCH_INDEX 0 +#define MAC_RSSDR_DMCH_WIDTH 4 #define MAC_SSIR_SNSINC_INDEX 8 #define MAC_SSIR_SNSINC_WIDTH 8 #define MAC_SSIR_SSINC_INDEX 16 @@ -844,9 +876,13 @@ #define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 #define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_WIDTH 16 +#define RX_NORMAL_DESC2_HL_INDEX 0 +#define RX_NORMAL_DESC2_HL_WIDTH 10 #define RX_NORMAL_DESC3_CDA_INDEX 27 #define RX_NORMAL_DESC3_CDA_WIDTH 1 #define RX_NORMAL_DESC3_CTXT_INDEX 30 @@ -855,14 +891,27 @@ #define RX_NORMAL_DESC3_ES_WIDTH 1 #define RX_NORMAL_DESC3_ETLT_INDEX 16 #define RX_NORMAL_DESC3_ETLT_WIDTH 4 +#define RX_NORMAL_DESC3_FD_INDEX 29 +#define RX_NORMAL_DESC3_FD_WIDTH 1 #define RX_NORMAL_DESC3_INTE_INDEX 30 #define RX_NORMAL_DESC3_INTE_WIDTH 1 +#define RX_NORMAL_DESC3_L34T_INDEX 20 +#define RX_NORMAL_DESC3_L34T_WIDTH 4 #define RX_NORMAL_DESC3_LD_INDEX 28 #define RX_NORMAL_DESC3_LD_WIDTH 1 #define RX_NORMAL_DESC3_OWN_INDEX 31 #define RX_NORMAL_DESC3_OWN_WIDTH 1 #define RX_NORMAL_DESC3_PL_INDEX 0 #define RX_NORMAL_DESC3_PL_WIDTH 14 +#define RX_NORMAL_DESC3_RSV_INDEX 26 +#define RX_NORMAL_DESC3_RSV_WIDTH 1 + +#define RX_DESC3_L34T_IPV4_TCP 1 +#define RX_DESC3_L34T_IPV4_UDP 2 +#define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV6_TCP 9 +#define RX_DESC3_L34T_IPV6_UDP 10 +#define RX_DESC3_L34T_IPV6_ICMP 11 #define RX_CONTEXT_DESC3_TSA_INDEX 4 #define RX_CONTEXT_DESC3_TSA_WIDTH 1 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 6fc5da01437d8ba75a3b8cf0cd16244b61e4f7d8..a50891f521978ff67d55959fbfab1b63da762f4c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -117,7 +117,7 @@ #include "xgbe.h" #include "xgbe-common.h" -static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *); +static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *); static void xgbe_free_ring(struct xgbe_prv_data *pdata, struct xgbe_ring *ring) @@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata, if (ring->rdata) { for (i = 0; i < ring->rdesc_count; i++) { rdata = XGBE_GET_DESC_DATA(ring, i); - xgbe_unmap_skb(pdata, rdata); + xgbe_unmap_rdata(pdata, rdata); } kfree(ring->rdata); ring->rdata = NULL; } + if (ring->rx_hdr_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, + ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_hdr_pa.pages); + + ring->rx_hdr_pa.pages = NULL; + ring->rx_hdr_pa.pages_len = 0; + ring->rx_hdr_pa.pages_offset = 0; + ring->rx_hdr_pa.pages_dma = 0; + } + + if (ring->rx_buf_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, + ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_buf_pa.pages); + + ring->rx_buf_pa.pages = NULL; + ring->rx_buf_pa.pages_len = 0; + ring->rx_buf_pa.pages_offset = 0; + ring->rx_buf_pa.pages_dma = 0; + } + if (ring->rdesc) { dma_free_coherent(pdata->dev, (sizeof(struct xgbe_ring_desc) * @@ -233,6 +255,96 @@ err_ring: return ret; } +static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, + struct xgbe_page_alloc *pa, gfp_t gfp, int order) +{ + struct page *pages = NULL; + dma_addr_t pages_dma; + int ret; + + /* Try to obtain pages, decreasing order if necessary */ + gfp |= __GFP_COLD | __GFP_COMP; + while (order >= 0) { + pages = alloc_pages(gfp, order); + if (pages) + break; + + order--; + } + if (!pages) + return -ENOMEM; + + /* Map the pages */ + pages_dma = dma_map_page(pdata->dev, pages, 0, + PAGE_SIZE << order, DMA_FROM_DEVICE); + ret = dma_mapping_error(pdata->dev, pages_dma); + if (ret) { + put_page(pages); + return ret; + } + + pa->pages = pages; + pa->pages_len = PAGE_SIZE << order; + pa->pages_offset = 0; + pa->pages_dma = pages_dma; + + return 0; +} + +static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd, + struct xgbe_page_alloc *pa, + unsigned int len) +{ + get_page(pa->pages); + bd->pa = *pa; + + bd->dma = pa->pages_dma + pa->pages_offset; + bd->dma_len = len; + + pa->pages_offset += len; + if ((pa->pages_offset + len) > pa->pages_len) { + /* This data descriptor is responsible for unmapping page(s) */ + bd->pa_unmap = *pa; + + /* Get a new allocation next time */ + pa->pages = NULL; + pa->pages_len = 0; + pa->pages_offset = 0; + pa->pages_dma = 0; + } +} + +static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, + struct xgbe_ring *ring, + struct xgbe_ring_data *rdata) +{ + int order, ret; + + if (!ring->rx_hdr_pa.pages) { + ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); + if (ret) + return ret; + } + + if (!ring->rx_buf_pa.pages) { + order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); + ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, + order); + if (ret) + return ret; + } + + /* Set up the header page info */ + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, + XGBE_SKB_ALLOC_SIZE); + + /* Set up the buffer page info */ + xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, + pdata->rx_buf_size); + + return 0; +} + static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; @@ -266,7 +378,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) ring->cur = 0; ring->dirty = 0; - ring->tx.queue_stopped = 0; + memset(&ring->tx, 0, sizeof(ring->tx)); hw_if->tx_desc_init(channel); } @@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) struct xgbe_ring *ring; struct xgbe_ring_desc *rdesc; struct xgbe_ring_data *rdata; - dma_addr_t rdesc_dma, skb_dma; - struct sk_buff *skb = NULL; + dma_addr_t rdesc_dma; unsigned int i, j; DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); @@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) rdata->rdesc = rdesc; rdata->rdesc_dma = rdesc_dma; - /* Allocate skb & assign to each rdesc */ - skb = dev_alloc_skb(pdata->rx_buf_size); - if (skb == NULL) - break; - skb_dma = dma_map_single(pdata->dev, skb->data, - pdata->rx_buf_size, - DMA_FROM_DEVICE); - if (dma_mapping_error(pdata->dev, skb_dma)) { - netdev_alert(pdata->netdev, - "failed to do the dma map\n"); - dev_kfree_skb_any(skb); + if (xgbe_map_rx_buffer(pdata, ring, rdata)) break; - } - rdata->skb = skb; - rdata->skb_dma = skb_dma; - rdata->skb_dma_len = pdata->rx_buf_size; rdesc++; rdesc_dma += sizeof(struct xgbe_ring_desc); @@ -325,8 +422,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) ring->cur = 0; ring->dirty = 0; - ring->rx.realloc_index = 0; - ring->rx.realloc_threshold = 0; + memset(&ring->rx, 0, sizeof(ring->rx)); hw_if->rx_desc_init(channel); } @@ -334,8 +430,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); } -static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, - struct xgbe_ring_data *rdata) +static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata, + struct xgbe_ring_data *rdata) { if (rdata->skb_dma) { if (rdata->mapped_as_page) { @@ -354,9 +450,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, rdata->skb = NULL; } - rdata->tso_header = 0; - rdata->len = 0; - rdata->interrupt = 0; + if (rdata->rx.hdr.pa.pages) + put_page(rdata->rx.hdr.pa.pages); + + if (rdata->rx.hdr.pa_unmap.pages) { + dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma, + rdata->rx.hdr.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(rdata->rx.hdr.pa_unmap.pages); + } + + if (rdata->rx.buf.pa.pages) + put_page(rdata->rx.buf.pa.pages); + + if (rdata->rx.buf.pa_unmap.pages) { + dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma, + rdata->rx.buf.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(rdata->rx.buf.pa_unmap.pages); + } + + memset(&rdata->tx, 0, sizeof(rdata->tx)); + memset(&rdata->rx, 0, sizeof(rdata->rx)); + rdata->mapped_as_page = 0; if (rdata->state_saved) { @@ -414,7 +530,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) } rdata->skb_dma = skb_dma; rdata->skb_dma_len = packet->header_len; - rdata->tso_header = 1; offset = packet->header_len; @@ -481,7 +596,11 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) } } - /* Save the skb address in the last entry */ + /* Save the skb address in the last entry. We always have some data + * that has been mapped so rdata is always advanced past the last + * piece of mapped data - use the entry pointed to by cur_index - 1. + */ + rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1); rdata->skb = skb; /* Save the number of descriptor entries used */ @@ -494,7 +613,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) err_out: while (start_index < cur_index) { rdata = XGBE_GET_DESC_DATA(ring, start_index++); - xgbe_unmap_skb(pdata, rdata); + xgbe_unmap_rdata(pdata, rdata); } DBGPR("<--xgbe_map_tx_skb: count=0\n"); @@ -502,40 +621,25 @@ err_out: return 0; } -static void xgbe_realloc_skb(struct xgbe_channel *channel) +static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; - struct sk_buff *skb = NULL; - dma_addr_t skb_dma; int i; - DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n", + DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n", ring->rx.realloc_index); for (i = 0; i < ring->dirty; i++) { rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index); /* Reset rdata values */ - xgbe_unmap_skb(pdata, rdata); + xgbe_unmap_rdata(pdata, rdata); - /* Allocate skb & assign to each rdesc */ - skb = dev_alloc_skb(pdata->rx_buf_size); - if (skb == NULL) + if (xgbe_map_rx_buffer(pdata, ring, rdata)) break; - skb_dma = dma_map_single(pdata->dev, skb->data, - pdata->rx_buf_size, DMA_FROM_DEVICE); - if (dma_mapping_error(pdata->dev, skb_dma)) { - netdev_alert(pdata->netdev, - "failed to do the dma map\n"); - dev_kfree_skb_any(skb); - break; - } - rdata->skb = skb; - rdata->skb_dma = skb_dma; - rdata->skb_dma_len = pdata->rx_buf_size; hw_if->rx_desc_reset(rdata); @@ -543,7 +647,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel) } ring->dirty = 0; - DBGPR("<--xgbe_realloc_skb\n"); + DBGPR("<--xgbe_realloc_rx_buffer\n"); } void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) @@ -553,8 +657,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; desc_if->free_ring_resources = xgbe_free_ring_resources; desc_if->map_tx_skb = xgbe_map_tx_skb; - desc_if->realloc_skb = xgbe_realloc_skb; - desc_if->unmap_skb = xgbe_unmap_skb; + desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer; + desc_if->unmap_rdata = xgbe_unmap_rdata; desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 9da3a03e8c0777595a7f99774be62c9528e55c6a..53f5f66ec2ee43fe5533697860f70d49a458b919 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) } } +static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1); + } + + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); +} + +static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + unsigned int wait; + int ret = 0; + + mutex_lock(&pdata->rss_mutex); + + if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { + ret = -EBUSY; + goto unlock; + } + + XGMAC_IOWRITE(pdata, MAC_RSSDR, val); + + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); + + wait = 1000; + while (wait--) { + if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + goto unlock; + + usleep_range(1000, 1500); + } + + ret = -EBUSY; + +unlock: + mutex_unlock(&pdata->rss_mutex); + + return ret; +} + +static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) +{ + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + unsigned int *key = (unsigned int *)&pdata->rss_key; + int ret; + + while (key_regs--) { + ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, + key_regs, *key++); + if (ret) + return ret; + } + + return 0; +} + +static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + ret = xgbe_write_rss_reg(pdata, + XGBE_RSS_LOOKUP_TABLE_TYPE, i, + pdata->rss_table[i]); + if (ret) + return ret; + } + + return 0; +} + +static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key) +{ + memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); + + return xgbe_write_rss_hash_key(pdata); +} + +static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, + const u32 *table) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); + + return xgbe_write_rss_lookup_table(pdata); +} + +static int xgbe_enable_rss(struct xgbe_prv_data *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + + /* Program the hash key */ + ret = xgbe_write_rss_hash_key(pdata); + if (ret) + return ret; + + /* Program the lookup table */ + ret = xgbe_write_rss_lookup_table(pdata); + if (ret) + return ret; + + /* Set the RSS options */ + XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); + + /* Enable RSS */ + XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); + + return 0; +} + +static int xgbe_disable_rss(struct xgbe_prv_data *pdata) +{ + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + + XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); + + return 0; +} + +static void xgbe_config_rss(struct xgbe_prv_data *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return; + + if (pdata->netdev->features & NETIF_F_RXHASH) + ret = xgbe_enable_rss(pdata); + else + ret = xgbe_disable_rss(pdata); + + if (ret) + netdev_err(pdata->netdev, + "error configuring RSS, RSS disabled\n"); +} + static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; @@ -465,17 +620,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) if (channel->tx_ring) { /* Enable the following Tx interrupts - * TIE - Transmit Interrupt Enable (unless polling) + * TIE - Transmit Interrupt Enable (unless using + * per channel interrupts) */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + if (!pdata->per_channel_irq) + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts * RBUE - Receive Buffer Unavailable Enable - * RIE - Receive Interrupt Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts) */ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + if (!pdata->per_channel_irq) + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); } XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); @@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) rdesc->desc1 = 0; rdesc->desc2 = 0; rdesc->desc3 = 0; + + /* Make sure ownership is written to the descriptor */ + wmb(); } static void xgbe_tx_desc_init(struct xgbe_channel *channel) { struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; - struct xgbe_ring_desc *rdesc; int i; int start_index = ring->cur; @@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel) /* Initialze all descriptors */ for (i = 0; i < ring->rdesc_count; i++) { rdata = XGBE_GET_DESC_DATA(ring, i); - rdesc = rdata->rdesc; - /* Initialize Tx descriptor - * Set buffer 1 (lo) address to zero - * Set buffer 1 (hi) address to zero - * Reset all other control bits (IC, TTSE, B2L & B1L) - * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, - * etc) - */ - rdesc->desc0 = 0; - rdesc->desc1 = 0; - rdesc->desc2 = 0; - rdesc->desc3 = 0; + /* Initialize Tx descriptor */ + xgbe_tx_desc_reset(rdata); } - /* Make sure everything is written to the descriptor(s) before - * telling the device about them - */ - wmb(); - /* Update the total number of Tx descriptors */ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); @@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata) struct xgbe_ring_desc *rdesc = rdata->rdesc; /* Reset the Rx descriptor - * Set buffer 1 (lo) address to dma address (lo) - * Set buffer 1 (hi) address to dma address (hi) - * Set buffer 2 (lo) address to zero - * Set buffer 2 (hi) address to zero and set control bits - * OWN and INTE + * Set buffer 1 (lo) address to header dma address (lo) + * Set buffer 1 (hi) address to header dma address (hi) + * Set buffer 2 (lo) address to buffer dma address (lo) + * Set buffer 2 (hi) address to buffer dma address (hi) and + * set control bits OWN and INTE */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); - rdesc->desc2 = 0; + rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); + rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); + rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); + rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); - rdesc->desc3 = 0; - if (rdata->interrupt) - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1); + XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, + rdata->interrupt ? 1 : 0); /* Since the Rx DMA engine is likely running, make sure everything * is written to the descriptor(s) before setting the OWN bit @@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; - struct xgbe_ring_desc *rdesc; unsigned int start_index = ring->cur; unsigned int rx_coalesce, rx_frames; unsigned int i; @@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) /* Initialize all descriptors */ for (i = 0; i < ring->rdesc_count; i++) { rdata = XGBE_GET_DESC_DATA(ring, i); - rdesc = rdata->rdesc; - /* Initialize Rx descriptor - * Set buffer 1 (lo) address to dma address (lo) - * Set buffer 1 (hi) address to dma address (hi) - * Set buffer 2 (lo) address to zero - * Set buffer 2 (hi) address to zero and set control - * bits OWN and INTE appropriateley - */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); - rdesc->desc2 = 0; - rdesc->desc3 = 0; - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1); - rdata->interrupt = 1; - if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) { - /* Clear interrupt on completion bit */ - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, - 0); + /* Set interrupt on completion bit as appropriate */ + if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) rdata->interrupt = 0; - } - } + else + rdata->interrupt = 1; - /* Make sure everything is written to the descriptors before - * telling the device about them - */ - wmb(); + /* Initialize Rx descriptor */ + xgbe_rx_desc_reset(rdata); + } /* Update the total number of Rx descriptors */ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); @@ -1198,7 +1325,30 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) xgbe_config_flow_control(pdata); } -static void xgbe_pre_xmit(struct xgbe_channel *channel) +static void xgbe_tx_start_xmit(struct xgbe_channel *channel, + struct xgbe_ring *ring) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_ring_data *rdata; + + /* Issue a poll command to Tx DMA by writing address + * of next immediate free descriptor */ + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, + lower_32_bits(rdata->rdesc_dma)); + + /* Start the Tx coalescing timer */ + if (pdata->tx_usecs && !channel->tx_timer_active) { + channel->tx_timer_active = 1; + hrtimer_start(&channel->tx_timer, + ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC), + HRTIMER_MODE_REL); + } + + ring->tx.xmit_more = 0; +} + +static void xgbe_dev_xmit(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring *ring = channel->tx_ring; @@ -1207,11 +1357,11 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) struct xgbe_packet_data *packet = &ring->packet_data; unsigned int csum, tso, vlan; unsigned int tso_context, vlan_context; - unsigned int tx_coalesce, tx_frames; + unsigned int tx_set_ic; int start_index = ring->cur; int i; - DBGPR("-->xgbe_pre_xmit\n"); + DBGPR("-->xgbe_dev_xmit\n"); csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, CSUM_ENABLE); @@ -1230,10 +1380,26 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) else vlan_context = 0; - tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0; - tx_frames = pdata->tx_frames; - if (tx_coalesce && !channel->tx_timer_active) - ring->coalesce_count = 0; + /* Determine if an interrupt should be generated for this Tx: + * Interrupt: + * - Tx frame count exceeds the frame count setting + * - Addition of Tx frame count to the frame count since the + * last interrupt was set exceeds the frame count setting + * No interrupt: + * - No frame count setting specified (ethtool -C ethX tx-frames 0) + * - Addition of Tx frame count to the frame count since the + * last interrupt was set does not exceed the frame count setting + */ + ring->coalesce_count += packet->tx_packets; + if (!pdata->tx_frames) + tx_set_ic = 0; + else if (packet->tx_packets > pdata->tx_frames) + tx_set_ic = 1; + else if ((ring->coalesce_count % pdata->tx_frames) < + packet->tx_packets) + tx_set_ic = 1; + else + tx_set_ic = 0; rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; @@ -1300,13 +1466,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); - /* Set IC bit based on Tx coalescing settings */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); - if (tx_coalesce && (!tx_frames || - (++ring->coalesce_count % tx_frames))) - /* Clear IC bit */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0); - /* Mark it as First Descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); @@ -1351,13 +1510,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, rdata->skb_dma_len); - /* Set IC bit based on Tx coalescing settings */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); - if (tx_coalesce && (!tx_frames || - (++ring->coalesce_count % tx_frames))) - /* Clear IC bit */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0); - /* Set OWN bit */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); @@ -1373,6 +1525,14 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) /* Set LAST bit for the last descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); + /* Set IC bit based on Tx coalescing settings */ + if (tx_set_ic) + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); + + /* Save the Tx info to report back during cleanup */ + rdata->tx.packets = packet->tx_packets; + rdata->tx.bytes = packet->tx_bytes; + /* In case the Tx DMA engine is running, make sure everything * is written to the descriptor(s) before setting the OWN bit * for the first descriptor @@ -1391,26 +1551,19 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) /* Make sure ownership is written to the descriptor */ wmb(); - /* Issue a poll command to Tx DMA by writing address - * of next immediate free descriptor */ ring->cur++; - rdata = XGBE_GET_DESC_DATA(ring, ring->cur); - XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, - lower_32_bits(rdata->rdesc_dma)); - - /* Start the Tx coalescing timer */ - if (tx_coalesce && !channel->tx_timer_active) { - channel->tx_timer_active = 1; - hrtimer_start(&channel->tx_timer, - ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC), - HRTIMER_MODE_REL); - } + if (!packet->skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, + channel->queue_index))) + xgbe_tx_start_xmit(channel, ring); + else + ring->tx.xmit_more = 1; DBGPR(" %s: descriptors %u to %u written\n", channel->name, start_index & (ring->rdesc_count - 1), (ring->cur - 1) & (ring->rdesc_count - 1)); - DBGPR("<--xgbe_pre_xmit\n"); + DBGPR("<--xgbe_dev_xmit\n"); } static int xgbe_dev_read(struct xgbe_channel *channel) @@ -1420,7 +1573,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; struct net_device *netdev = channel->pdata->netdev; - unsigned int err, etlt; + unsigned int err, etlt, l34t; DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); @@ -1431,6 +1584,9 @@ static int xgbe_dev_read(struct xgbe_channel *channel) if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) return 1; + /* Make sure descriptor fields are read after reading the OWN bit */ + rmb(); + #ifdef XGMAC_ENABLE_RX_DESC_DUMP xgbe_dump_rx_desc(ring, rdesc, ring->cur); #endif @@ -1454,8 +1610,33 @@ static int xgbe_dev_read(struct xgbe_channel *channel) XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT_NEXT, 1); + /* Get the header length */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) + rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, + RX_NORMAL_DESC2, HL); + + /* Get the RSS hash */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + RSS_HASH, 1); + + packet->rss_hash = le32_to_cpu(rdesc->desc1); + + l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); + switch (l34t) { + case RX_DESC3_L34T_IPV4_TCP: + case RX_DESC3_L34T_IPV4_UDP: + case RX_DESC3_L34T_IPV6_TCP: + case RX_DESC3_L34T_IPV6_UDP: + packet->rss_hash_type = PKT_HASH_TYPE_L4; + break; + default: + packet->rss_hash_type = PKT_HASH_TYPE_L3; + } + } + /* Get the packet length */ - rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); + rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { /* Not all the data has been transferred for this packet */ @@ -1478,7 +1659,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel) etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); DBGPR(" err=%u, etlt=%#x\n", err, etlt); - if (!err || (err && !etlt)) { + if (!err || !etlt) { + /* No error if err is 0 or etlt is 0 */ if ((etlt == 0x09) && (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, @@ -2298,6 +2480,47 @@ static void xgbe_config_mmc(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); } +static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, + struct xgbe_channel *channel) +{ + unsigned int tx_dsr, tx_pos, tx_qidx; + unsigned int tx_status; + unsigned long tx_timeout; + + /* Calculate the status register to read and the position within */ + if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { + tx_dsr = DMA_DSR0; + tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) + + DMA_DSR0_TPS_START; + } else { + tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; + + tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); + tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + + DMA_DSRX_TPS_START; + } + + /* The Tx engine cannot be stopped if it is actively processing + * descriptors. Wait for the Tx engine to enter the stopped or + * suspended state. Don't wait forever though... + */ + tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); + while (time_before(jiffies, tx_timeout)) { + tx_status = XGMAC_IOREAD(pdata, tx_dsr); + tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); + if ((tx_status == DMA_TPS_STOPPED) || + (tx_status == DMA_TPS_SUSPENDED)) + break; + + usleep_range(500, 1000); + } + + if (!time_before(jiffies, tx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Tx DMA channel %u to stop\n", + channel->queue_index); +} + static void xgbe_enable_tx(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; @@ -2326,6 +2549,15 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata) struct xgbe_channel *channel; unsigned int i; + /* Prepare for Tx DMA channel stop */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + xgbe_prepare_tx_stop(pdata, channel); + } + /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); @@ -2417,6 +2649,15 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) struct xgbe_channel *channel; unsigned int i; + /* Prepare for Tx DMA channel stop */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + xgbe_prepare_tx_stop(pdata, channel); + } + /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); @@ -2485,6 +2726,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata) xgbe_config_tx_coalesce(pdata); xgbe_config_rx_buffer_size(pdata); xgbe_config_tso_mode(pdata); + xgbe_config_sph_mode(pdata); + xgbe_config_rss(pdata); desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata); xgbe_enable_dma_interrupts(pdata); @@ -2561,7 +2804,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->powerup_rx = xgbe_powerup_rx; hw_if->powerdown_rx = xgbe_powerdown_rx; - hw_if->pre_xmit = xgbe_pre_xmit; + hw_if->dev_xmit = xgbe_dev_xmit; hw_if->dev_read = xgbe_dev_read; hw_if->enable_int = xgbe_enable_int; hw_if->disable_int = xgbe_disable_int; @@ -2575,6 +2818,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->rx_desc_reset = xgbe_rx_desc_reset; hw_if->is_last_desc = xgbe_is_last_desc; hw_if->is_context_desc = xgbe_is_context_desc; + hw_if->tx_start_xmit = xgbe_tx_start_xmit; /* For FLOW ctrl */ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; @@ -2620,5 +2864,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->config_dcb_tc = xgbe_config_dcb_tc; hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; + /* For Receive Side Scaling */ + hw_if->enable_rss = xgbe_enable_rss; + hw_if->disable_rss = xgbe_disable_rss; + hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; + hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; + DBGPR("<--xgbe_init_function_ptrs\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 2349ea9702557b95de0860eb5921589104cd5c9d..7bb5f07dbeef3e806174047cc883e3fb6ca149c3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -114,6 +114,7 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include #include @@ -126,14 +127,126 @@ #include "xgbe.h" #include "xgbe-common.h" -static int xgbe_poll(struct napi_struct *, int); +static int xgbe_one_poll(struct napi_struct *, int); +static int xgbe_all_poll(struct napi_struct *, int); static void xgbe_set_rx_mode(struct net_device *); +static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel_mem, *channel; + struct xgbe_ring *tx_ring, *rx_ring; + unsigned int count, i; + int ret = -ENOMEM; + + count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); + + channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL); + if (!channel_mem) + goto err_channel; + + tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring), + GFP_KERNEL); + if (!tx_ring) + goto err_tx_ring; + + rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring), + GFP_KERNEL); + if (!rx_ring) + goto err_rx_ring; + + for (i = 0, channel = channel_mem; i < count; i++, channel++) { + snprintf(channel->name, sizeof(channel->name), "channel-%d", i); + channel->pdata = pdata; + channel->queue_index = i; + channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * i); + + if (pdata->per_channel_irq) { + /* Get the DMA interrupt (offset 1) */ + ret = platform_get_irq(pdata->pdev, i + 1); + if (ret < 0) { + netdev_err(pdata->netdev, + "platform_get_irq %u failed\n", + i + 1); + goto err_irq; + } + + channel->dma_irq = ret; + } + + if (i < pdata->tx_ring_count) { + spin_lock_init(&tx_ring->lock); + channel->tx_ring = tx_ring++; + } + + if (i < pdata->rx_ring_count) { + spin_lock_init(&rx_ring->lock); + channel->rx_ring = rx_ring++; + } + + DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n", + channel->name, channel->queue_index, channel->dma_regs, + channel->dma_irq, channel->tx_ring, channel->rx_ring); + } + + pdata->channel = channel_mem; + pdata->channel_count = count; + + return 0; + +err_irq: + kfree(rx_ring); + +err_rx_ring: + kfree(tx_ring); + +err_tx_ring: + kfree(channel_mem); + +err_channel: + return ret; +} + +static void xgbe_free_channels(struct xgbe_prv_data *pdata) +{ + if (!pdata->channel) + return; + + kfree(pdata->channel->rx_ring); + kfree(pdata->channel->tx_ring); + kfree(pdata->channel); + + pdata->channel = NULL; + pdata->channel_count = 0; +} + static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) { return (ring->rdesc_count - (ring->cur - ring->dirty)); } +static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, + struct xgbe_ring *ring, unsigned int count) +{ + struct xgbe_prv_data *pdata = channel->pdata; + + if (count > xgbe_tx_avail_desc(ring)) { + DBGPR(" Tx queue stopped, not enough descriptors available\n"); + netif_stop_subqueue(pdata->netdev, channel->queue_index); + ring->tx.queue_stopped = 1; + + /* If we haven't notified the hardware because of xmit_more + * support, tell it now + */ + if (ring->tx.xmit_more) + pdata->hw_if.tx_start_xmit(channel, ring); + + return NETDEV_TX_BUSY; + } + + return 0; +} + static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) { unsigned int rx_buf_size; @@ -144,8 +257,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) } rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE) - rx_buf_size = XGBE_RX_MIN_BUF_SIZE; + rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE); + rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & ~(XGBE_RX_BUF_ALIGN - 1); @@ -213,11 +326,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) if (!dma_isr) goto isr_done; - DBGPR("-->xgbe_isr\n"); - DBGPR(" DMA_ISR = %08x\n", dma_isr); - DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0)); - DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1)); for (i = 0; i < pdata->channel_count; i++) { if (!(dma_isr & (1 << i))) @@ -228,6 +337,10 @@ static irqreturn_t xgbe_isr(int irq, void *data) dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); + /* If we get a TI or RI interrupt that means per channel DMA + * interrupts are not enabled, so we use the private data napi + * structure, not the per channel napi structure + */ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) { if (napi_schedule_prep(&pdata->napi)) { @@ -270,12 +383,28 @@ static irqreturn_t xgbe_isr(int irq, void *data) DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); - DBGPR("<--xgbe_isr\n"); - isr_done: return IRQ_HANDLED; } +static irqreturn_t xgbe_dma_isr(int irq, void *data) +{ + struct xgbe_channel *channel = data; + + /* Per channel DMA interrupts are enabled, so we use the per + * channel napi structure and not the private data napi structure + */ + if (napi_schedule_prep(&channel->napi)) { + /* Disable Tx and Rx interrupts */ + disable_irq_nosync(channel->dma_irq); + + /* Turn on polling */ + __napi_schedule(&channel->napi); + } + + return IRQ_HANDLED; +} + static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) { struct xgbe_channel *channel = container_of(timer, @@ -283,18 +412,24 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) tx_timer); struct xgbe_ring *ring = channel->tx_ring; struct xgbe_prv_data *pdata = channel->pdata; + struct napi_struct *napi; unsigned long flags; DBGPR("-->xgbe_tx_timer\n"); + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + spin_lock_irqsave(&ring->lock, flags); - if (napi_schedule_prep(&pdata->napi)) { + if (napi_schedule_prep(napi)) { /* Disable Tx and Rx interrupts */ - xgbe_disable_rx_tx_ints(pdata); + if (pdata->per_channel_irq) + disable_irq(channel->dma_irq); + else + xgbe_disable_rx_tx_ints(pdata); /* Turn on polling */ - __napi_schedule(&pdata->napi); + __napi_schedule(napi); } channel->tx_timer_active = 0; @@ -430,18 +565,46 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) { - if (add) - netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll, - NAPI_POLL_WEIGHT); - napi_enable(&pdata->napi); + struct xgbe_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (add) + netif_napi_add(pdata->netdev, &channel->napi, + xgbe_one_poll, NAPI_POLL_WEIGHT); + + napi_enable(&channel->napi); + } + } else { + if (add) + netif_napi_add(pdata->netdev, &pdata->napi, + xgbe_all_poll, NAPI_POLL_WEIGHT); + + napi_enable(&pdata->napi); + } } static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) { - napi_disable(&pdata->napi); + struct xgbe_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + napi_disable(&channel->napi); - if (del) - netif_napi_del(&pdata->napi); + if (del) + netif_napi_del(&channel->napi); + } + } else { + napi_disable(&pdata->napi); + + if (del) + netif_napi_del(&pdata->napi); + } } void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) @@ -472,7 +635,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) DBGPR("<--xgbe_init_rx_coalesce\n"); } -static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) +static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) { struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_channel *channel; @@ -480,7 +643,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) struct xgbe_ring_data *rdata; unsigned int i, j; - DBGPR("-->xgbe_free_tx_skbuff\n"); + DBGPR("-->xgbe_free_tx_data\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -490,14 +653,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); - desc_if->unmap_skb(pdata, rdata); + desc_if->unmap_rdata(pdata, rdata); } } - DBGPR("<--xgbe_free_tx_skbuff\n"); + DBGPR("<--xgbe_free_tx_data\n"); } -static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) +static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) { struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_channel *channel; @@ -505,7 +668,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) struct xgbe_ring_data *rdata; unsigned int i, j; - DBGPR("-->xgbe_free_rx_skbuff\n"); + DBGPR("-->xgbe_free_rx_data\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -515,11 +678,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); - desc_if->unmap_skb(pdata, rdata); + desc_if->unmap_rdata(pdata, rdata); } } - DBGPR("<--xgbe_free_rx_skbuff\n"); + DBGPR("<--xgbe_free_rx_data\n"); } static void xgbe_adjust_link(struct net_device *netdev) @@ -735,7 +898,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata) static void xgbe_stop(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel; struct net_device *netdev = pdata->netdev; + struct netdev_queue *txq; + unsigned int i; DBGPR("-->xgbe_stop\n"); @@ -749,12 +915,23 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + continue; + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + netdev_tx_reset_queue(txq); + } + DBGPR("<--xgbe_stop\n"); } static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) { + struct xgbe_channel *channel; struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned int i; DBGPR("-->xgbe_restart_dev\n"); @@ -763,10 +940,15 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) return; xgbe_stop(pdata); - synchronize_irq(pdata->irq_number); + synchronize_irq(pdata->dev_irq); + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + synchronize_irq(channel->dma_irq); + } - xgbe_free_tx_skbuff(pdata); - xgbe_free_rx_skbuff(pdata); + xgbe_free_tx_data(pdata); + xgbe_free_rx_data(pdata); /* Issue software reset to device if requested */ if (reset) @@ -1008,6 +1190,12 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) packet->tcp_header_len, packet->tcp_payload_len); DBGPR(" packet->mss=%u\n", packet->mss); + /* Update the number of packets that will ultimately be transmitted + * along with the extra bytes for each extra packet + */ + packet->tx_packets = skb_shinfo(skb)->gso_segs; + packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len; + return 0; } @@ -1033,17 +1221,22 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, unsigned int len; unsigned int i; + packet->skb = skb; + context_desc = 0; packet->rdesc_count = 0; + packet->tx_packets = 1; + packet->tx_bytes = skb->len; + if (xgbe_is_tso(skb)) { - /* TSO requires an extra desriptor if mss is different */ + /* TSO requires an extra descriptor if mss is different */ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { context_desc = 1; packet->rdesc_count++; } - /* TSO requires an extra desriptor for TSO header */ + /* TSO requires an extra descriptor for TSO header */ packet->rdesc_count++; XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, @@ -1091,6 +1284,8 @@ static int xgbe_open(struct net_device *netdev) struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_channel *channel = NULL; + unsigned int i = 0; int ret; DBGPR("-->xgbe_open\n"); @@ -1119,24 +1314,48 @@ static int xgbe_open(struct net_device *netdev) goto err_ptpclk; pdata->rx_buf_size = ret; + /* Allocate the channel and ring structures */ + ret = xgbe_alloc_channels(pdata); + if (ret) + goto err_ptpclk; + /* Allocate the ring descriptors and buffers */ ret = desc_if->alloc_ring_resources(pdata); if (ret) - goto err_ptpclk; + goto err_channels; /* Initialize the device restart and Tx timestamp work struct */ INIT_WORK(&pdata->restart_work, xgbe_restart); INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); /* Request interrupts */ - ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0, + ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, netdev->name, pdata); if (ret) { netdev_alert(netdev, "error requesting irq %d\n", - pdata->irq_number); - goto err_irq; + pdata->dev_irq); + goto err_rings; + } + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(channel->dma_irq_name, + sizeof(channel->dma_irq_name) - 1, + "%s-TxRx-%u", netdev_name(netdev), + channel->queue_index); + + ret = devm_request_irq(pdata->dev, channel->dma_irq, + xgbe_dma_isr, 0, + channel->dma_irq_name, channel); + if (ret) { + netdev_alert(netdev, + "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } } - pdata->irq_number = netdev->irq; ret = xgbe_start(pdata); if (ret) @@ -1149,12 +1368,21 @@ static int xgbe_open(struct net_device *netdev) err_start: hw_if->exit(pdata); - devm_free_irq(pdata->dev, pdata->irq_number, pdata); - pdata->irq_number = 0; - err_irq: + if (pdata->per_channel_irq) { + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + for (i--, channel--; i < pdata->channel_count; i--, channel--) + devm_free_irq(pdata->dev, channel->dma_irq, channel); + } + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + +err_rings: desc_if->free_ring_resources(pdata); +err_channels: + xgbe_free_channels(pdata); + err_ptpclk: clk_disable_unprepare(pdata->ptpclk); @@ -1172,6 +1400,8 @@ static int xgbe_close(struct net_device *netdev) struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_channel *channel; + unsigned int i; DBGPR("-->xgbe_close\n"); @@ -1181,15 +1411,20 @@ static int xgbe_close(struct net_device *netdev) /* Issue software reset to device */ hw_if->exit(pdata); - /* Free all the ring data */ + /* Free the ring descriptors and buffers */ desc_if->free_ring_resources(pdata); - /* Release the interrupt */ - if (pdata->irq_number != 0) { - devm_free_irq(pdata->dev, pdata->irq_number, pdata); - pdata->irq_number = 0; + /* Release the interrupts */ + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + devm_free_irq(pdata->dev, channel->dma_irq, channel); } + /* Free the channel and ring structures */ + xgbe_free_channels(pdata); + /* Disable the clocks */ clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->sysclk); @@ -1210,12 +1445,14 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_packet_data *packet; + struct netdev_queue *txq; unsigned long flags; int ret; DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); channel = pdata->channel + skb->queue_mapping; + txq = netdev_get_tx_queue(netdev, channel->queue_index); ring = channel->tx_ring; packet = &ring->packet_data; @@ -1234,13 +1471,9 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) xgbe_packet_info(pdata, ring, skb, packet); /* Check that there are enough descriptors available */ - if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) { - DBGPR(" Tx queue stopped, not enough descriptors available\n"); - netif_stop_subqueue(netdev, channel->queue_index); - ring->tx.queue_stopped = 1; - ret = NETDEV_TX_BUSY; + ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count); + if (ret) goto tx_netdev_return; - } ret = xgbe_prep_tso(skb, packet); if (ret) { @@ -1257,13 +1490,21 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) xgbe_prep_tx_tstamp(pdata, skb, packet); + /* Report on the actual number of bytes (to be) sent */ + netdev_tx_sent_queue(txq, packet->tx_bytes); + /* Configure required descriptor fields for transmission */ - hw_if->pre_xmit(channel); + hw_if->dev_xmit(channel); #ifdef XGMAC_ENABLE_TX_PKT_DUMP xgbe_print_pkt(netdev, skb, true); #endif + /* Stop the queue in advance if there may not be enough descriptors */ + xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS); + + ret = NETDEV_TX_OK; + tx_netdev_return: spin_unlock_irqrestore(&ring->lock, flags); @@ -1420,14 +1661,20 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, static void xgbe_poll_controller(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_channel *channel; + unsigned int i; DBGPR("-->xgbe_poll_controller\n"); - disable_irq(pdata->irq_number); - - xgbe_isr(pdata->irq_number, pdata); - - enable_irq(pdata->irq_number); + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + xgbe_dma_isr(channel->dma_irq, channel); + } else { + disable_irq(pdata->dev_irq); + xgbe_isr(pdata->dev_irq, pdata); + enable_irq(pdata->dev_irq); + } DBGPR("<--xgbe_poll_controller\n"); } @@ -1465,12 +1712,21 @@ static int xgbe_set_features(struct net_device *netdev, { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; - netdev_features_t rxcsum, rxvlan, rxvlan_filter; + netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; + int ret = 0; + rxhash = pdata->netdev_features & NETIF_F_RXHASH; rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; + if ((features & NETIF_F_RXHASH) && !rxhash) + ret = hw_if->enable_rss(pdata); + else if (!(features & NETIF_F_RXHASH) && rxhash) + ret = hw_if->disable_rss(pdata); + if (ret) + return ret; + if ((features & NETIF_F_RXCSUM) && !rxcsum) hw_if->enable_rx_csum(pdata); else if (!(features & NETIF_F_RXCSUM) && rxcsum) @@ -1524,7 +1780,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; - desc_if->realloc_skb(channel); + desc_if->realloc_rx_buffer(channel); /* Update the Rx Tail Pointer Register with address of * the last cleaned entry */ @@ -1533,6 +1789,31 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) lower_32_bits(rdata->rdesc_dma)); } +static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, + struct xgbe_ring_data *rdata, + unsigned int *len) +{ + struct net_device *netdev = pdata->netdev; + struct sk_buff *skb; + u8 *packet; + unsigned int copy_len; + + skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len); + if (!skb) + return NULL; + + packet = page_address(rdata->rx.hdr.pa.pages) + + rdata->rx.hdr.pa.pages_offset; + copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len; + copy_len = min(rdata->rx.hdr.dma_len, copy_len); + skb_copy_to_linear_data(skb, packet, copy_len); + skb_put(skb, copy_len); + + *len -= copy_len; + + return skb; +} + static int xgbe_tx_poll(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; @@ -1542,8 +1823,10 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct net_device *netdev = pdata->netdev; + struct netdev_queue *txq; unsigned long flags; int processed = 0; + unsigned int tx_packets = 0, tx_bytes = 0; DBGPR("-->xgbe_tx_poll\n"); @@ -1551,36 +1834,53 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) if (!ring) return 0; + txq = netdev_get_tx_queue(netdev, channel->queue_index); + spin_lock_irqsave(&ring->lock, flags); while ((processed < XGBE_TX_DESC_MAX_PROC) && - (ring->dirty < ring->cur)) { + (ring->dirty != ring->cur)) { rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); rdesc = rdata->rdesc; if (!hw_if->tx_complete(rdesc)) break; + /* Make sure descriptor fields are read after reading the OWN + * bit */ + rmb(); + #ifdef XGMAC_ENABLE_TX_DESC_DUMP xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); #endif + if (hw_if->is_last_desc(rdesc)) { + tx_packets += rdata->tx.packets; + tx_bytes += rdata->tx.bytes; + } + /* Free the SKB and reset the descriptor for re-use */ - desc_if->unmap_skb(pdata, rdata); + desc_if->unmap_rdata(pdata, rdata); hw_if->tx_desc_reset(rdata); processed++; ring->dirty++; } + if (!processed) + goto unlock; + + netdev_tx_completed_queue(txq, tx_packets, tx_bytes); + if ((ring->tx.queue_stopped == 1) && (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) { ring->tx.queue_stopped = 0; - netif_wake_subqueue(netdev, channel->queue_index); + netif_tx_wake_queue(txq); } DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); +unlock: spin_unlock_irqrestore(&ring->lock, flags); return processed; @@ -1594,6 +1894,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) struct xgbe_ring_data *rdata; struct xgbe_packet_data *packet; struct net_device *netdev = pdata->netdev; + struct napi_struct *napi; struct sk_buff *skb; struct skb_shared_hwtstamps *hwtstamps; unsigned int incomplete, error, context_next, context; @@ -1607,6 +1908,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) if (!ring) return 0; + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); packet = &ring->packet_data; while (packet_count < budget) { @@ -1641,10 +1944,6 @@ read_again: ring->cur++; ring->dirty++; - dma_unmap_single(pdata->dev, rdata->skb_dma, - rdata->skb_dma_len, DMA_FROM_DEVICE); - rdata->skb_dma = 0; - incomplete = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, INCOMPLETE); @@ -1667,33 +1966,40 @@ read_again: } if (!context) { - put_len = rdata->len - len; - if (skb) { - if (pskb_expand_head(skb, 0, put_len, - GFP_ATOMIC)) { - DBGPR("pskb_expand_head error\n"); - if (incomplete) { - error = 1; - goto read_again; - } - - dev_kfree_skb(skb); - goto next_packet; + put_len = rdata->rx.len - len; + len += put_len; + + if (!skb) { + dma_sync_single_for_cpu(pdata->dev, + rdata->rx.hdr.dma, + rdata->rx.hdr.dma_len, + DMA_FROM_DEVICE); + + skb = xgbe_create_skb(pdata, rdata, &put_len); + if (!skb) { + error = 1; + goto skip_data; } - memcpy(skb_tail_pointer(skb), rdata->skb->data, - put_len); - } else { - skb = rdata->skb; - rdata->skb = NULL; } - skb_put(skb, put_len); - len += put_len; + + if (put_len) { + dma_sync_single_for_cpu(pdata->dev, + rdata->rx.buf.dma, + rdata->rx.buf.dma_len, + DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rdata->rx.buf.pa.pages, + rdata->rx.buf.pa.pages_offset, + put_len, rdata->rx.buf.dma_len); + rdata->rx.buf.pa.pages = NULL; + } } +skip_data: if (incomplete || context_next) goto read_again; - /* Stray Context Descriptor? */ if (!skb) goto next_packet; @@ -1733,13 +2039,18 @@ read_again: hwtstamps->hwtstamp = ns_to_ktime(nsec); } + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, RSS_HASH)) + skb_set_hash(skb, packet->rss_hash, + packet->rss_hash_type); + skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, channel->queue_index); - skb_mark_napi_id(skb, &pdata->napi); + skb_mark_napi_id(skb, napi); netdev->last_rx = jiffies; - napi_gro_receive(&pdata->napi, skb); + napi_gro_receive(napi, skb); next_packet: packet_count++; @@ -1761,7 +2072,35 @@ next_packet: return packet_count; } -static int xgbe_poll(struct napi_struct *napi, int budget) +static int xgbe_one_poll(struct napi_struct *napi, int budget) +{ + struct xgbe_channel *channel = container_of(napi, struct xgbe_channel, + napi); + int processed = 0; + + DBGPR("-->xgbe_one_poll: budget=%d\n", budget); + + /* Cleanup Tx ring first */ + xgbe_tx_poll(channel); + + /* Process Rx ring next */ + processed = xgbe_rx_poll(channel, budget); + + /* If we processed everything, we are done */ + if (processed < budget) { + /* Turn off polling */ + napi_complete(napi); + + /* Enable Tx and Rx interrupts */ + enable_irq(channel->dma_irq); + } + + DBGPR("<--xgbe_one_poll: received = %d\n", processed); + + return processed; +} + +static int xgbe_all_poll(struct napi_struct *napi, int budget) { struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, napi); @@ -1770,7 +2109,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget) int processed, last_processed; unsigned int i; - DBGPR("-->xgbe_poll: budget=%d\n", budget); + DBGPR("-->xgbe_all_poll: budget=%d\n", budget); processed = 0; ring_budget = budget / pdata->rx_ring_count; @@ -1798,7 +2137,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget) xgbe_enable_rx_tx_ints(pdata); } - DBGPR("<--xgbe_poll: received = %d\n", processed); + DBGPR("<--xgbe_all_poll: received = %d\n", processed); return processed; } @@ -1812,10 +2151,10 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, while (count--) { rdata = XGBE_GET_DESC_DATA(ring, idx); rdesc = rdata->rdesc; - DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, - (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", - le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), - le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); + pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, + (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", + le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), + le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); idx++; } } @@ -1823,9 +2162,9 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, unsigned int idx) { - DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, - le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), - le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); + pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, + le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), + le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); } void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 49508ec98b72c1c01fdd189fc71e8c7465c77d2c..ebf489351555b19411d055a12c9812c08f73dd1c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev, rx_usecs); return -EINVAL; } - if (rx_frames > pdata->channel->rx_ring->rdesc_count) { + if (rx_frames > pdata->rx_desc_count) { netdev_alert(netdev, "rx-frames is limited to %d frames\n", - pdata->channel->rx_ring->rdesc_count); + pdata->rx_desc_count); return -EINVAL; } @@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev, tx_frames = ec->tx_max_coalesced_frames; /* Check the bounds of values for Tx */ - if (tx_frames > pdata->channel->tx_ring->rdesc_count) { + if (tx_frames > pdata->tx_desc_count) { netdev_alert(netdev, "tx-frames is limited to %d frames\n", - pdata->channel->tx_ring->rdesc_count); + pdata->tx_desc_count); return -EINVAL; } @@ -481,6 +481,82 @@ static int xgbe_set_coalesce(struct net_device *netdev, return 0; } +static int xgbe_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = pdata->rx_ring_count; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 xgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return sizeof(pdata->rss_key); +} + +static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return ARRAY_SIZE(pdata->rss_table); +} + +static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int i; + + if (indir) { + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) + indir[i] = XGMAC_GET_BITS(pdata->rss_table[i], + MAC_RSSDR, DMCH); + } + + if (key) + memcpy(key, pdata->rss_key, sizeof(pdata->rss_key)); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned int ret; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + ret = hw_if->set_rss_lookup_table(pdata, indir); + if (ret) + return ret; + } + + if (key) { + ret = hw_if->set_rss_hash_key(pdata, key); + if (ret) + return ret; + } + + return 0; +} + static int xgbe_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *ts_info) { @@ -526,6 +602,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = { .get_strings = xgbe_get_strings, .get_ethtool_stats = xgbe_get_ethtool_stats, .get_sset_count = xgbe_get_sset_count, + .get_rxnfc = xgbe_get_rxnfc, + .get_rxfh_key_size = xgbe_get_rxfh_key_size, + .get_rxfh_indir_size = xgbe_get_rxfh_indir_size, + .get_rxfh = xgbe_get_rxfh, + .set_rxfh = xgbe_set_rxfh, .get_ts_info = xgbe_get_ts_info, }; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index f5a8fa03921aafdff9dd8c8274b4bd055ad05e8c..dbd3850b8b0a8f61053026a887e5a90f7a3aef53 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(XGBE_DRV_VERSION); MODULE_DESCRIPTION(XGBE_DRV_DESC); -static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata) -{ - struct xgbe_channel *channel_mem, *channel; - struct xgbe_ring *tx_ring, *rx_ring; - unsigned int count, i; - - DBGPR("-->xgbe_alloc_rings\n"); - - count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); - - channel_mem = devm_kcalloc(pdata->dev, count, - sizeof(struct xgbe_channel), GFP_KERNEL); - if (!channel_mem) - return NULL; - - tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count, - sizeof(struct xgbe_ring), GFP_KERNEL); - if (!tx_ring) - return NULL; - - rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count, - sizeof(struct xgbe_ring), GFP_KERNEL); - if (!rx_ring) - return NULL; - - for (i = 0, channel = channel_mem; i < count; i++, channel++) { - snprintf(channel->name, sizeof(channel->name), "channel-%d", i); - channel->pdata = pdata; - channel->queue_index = i; - channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + - (DMA_CH_INC * i); - - if (i < pdata->tx_ring_count) { - spin_lock_init(&tx_ring->lock); - channel->tx_ring = tx_ring++; - } - - if (i < pdata->rx_ring_count) { - spin_lock_init(&rx_ring->lock); - channel->rx_ring = rx_ring++; - } - - DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n", - channel->name, channel->queue_index, channel->dma_regs, - channel->tx_ring, channel->rx_ring); - } - - pdata->channel_count = count; - - DBGPR("<--xgbe_alloc_rings\n"); - - return channel_mem; -} - static void xgbe_default_config(struct xgbe_prv_data *pdata) { DBGPR("-->xgbe_default_config\n"); @@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; const u8 *mac_addr; + unsigned int i; int ret; DBGPR("--> xgbe_probe\n"); @@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev) spin_lock_init(&pdata->lock); mutex_init(&pdata->xpcs_mutex); + mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); /* Set and validate the number of descriptors for a ring */ @@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev) pdata->awcache = XGBE_DMA_SYS_AWCACHE; } + /* Check for per channel interrupt support */ + if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS)) + pdata->per_channel_irq = 1; + ret = platform_get_irq(pdev, 0); if (ret < 0) { - dev_err(dev, "platform_get_irq failed\n"); + dev_err(dev, "platform_get_irq 0 failed\n"); goto err_io; } - netdev->irq = ret; + pdata->dev_irq = ret; + + netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->xgmac_regs; /* Set all the function pointers */ @@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev) goto err_io; } - /* Allocate the rings for the DMA channels */ - pdata->channel = xgbe_alloc_rings(pdata); - if (!pdata->channel) { - dev_err(dev, "ring allocation failed\n"); - ret = -ENOMEM; - goto err_io; - } + /* Initialize RSS hash key and lookup table */ + netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); + + for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, + i % pdata->rx_ring_count); + + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Prepare to regsiter with MDIO */ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name); @@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER; + if (pdata->hw_feat.rss) + netdev->hw_features |= NETIF_F_RXHASH; + netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 789957d43a1379939e2aa9b8d15f267473a271a6..f9ec762ac3f037e780c3f44f70fec2e5bf06577b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -140,11 +140,25 @@ #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +/* Descriptors required for maximum contigous TSO/GSO packet */ +#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1) + +/* Maximum possible descriptors needed for an SKB: + * - Maximum number of SKB frags + * - Maximum descriptors for contiguous TSO/GSO packet + * - Possible context descriptor + * - Possible TSO header descriptor + */ +#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2) + #define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) #define XGBE_RX_BUF_ALIGN 64 +#define XGBE_SKB_ALLOC_SIZE 256 +#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */ #define XGBE_MAX_DMA_CHANNELS 16 #define XGBE_MAX_QUEUES 16 +#define XGBE_DMA_STOP_TIMEOUT 5 /* DMA cache settings - Outer sharable, write-back, write-allocate */ #define XGBE_DMA_OS_AXDOMAIN 0x2 @@ -171,6 +185,7 @@ /* Device-tree clock names */ #define XGBE_DMA_CLOCK "dma_clk" #define XGBE_PTP_CLOCK "ptp_clk" +#define XGBE_DMA_IRQS "amd,per-channel-interrupt" /* Timestamp support - values based on 50MHz PTP clock * 50MHz => 20 nsec @@ -212,9 +227,17 @@ /* Maximum MAC address hash table size (256 bits = 8 bytes) */ #define XGBE_MAC_HASH_TABLE_SIZE 8 +/* Receive Side Scaling */ +#define XGBE_RSS_HASH_KEY_SIZE 40 +#define XGBE_RSS_MAX_TABLE_SIZE 256 +#define XGBE_RSS_LOOKUP_TABLE_TYPE 0 +#define XGBE_RSS_HASH_KEY_TYPE 1 + struct xgbe_prv_data; struct xgbe_packet_data { + struct sk_buff *skb; + unsigned int attributes; unsigned int errors; @@ -230,14 +253,53 @@ struct xgbe_packet_data { unsigned short vlan_ctag; u64 rx_tstamp; + + u32 rss_hash; + enum pkt_hash_types rss_hash_type; + + unsigned int tx_packets; + unsigned int tx_bytes; }; /* Common Rx and Tx descriptor mapping */ struct xgbe_ring_desc { - unsigned int desc0; - unsigned int desc1; - unsigned int desc2; - unsigned int desc3; + __le32 desc0; + __le32 desc1; + __le32 desc2; + __le32 desc3; +}; + +/* Page allocation related values */ +struct xgbe_page_alloc { + struct page *pages; + unsigned int pages_len; + unsigned int pages_offset; + + dma_addr_t pages_dma; +}; + +/* Ring entry buffer data */ +struct xgbe_buffer_data { + struct xgbe_page_alloc pa; + struct xgbe_page_alloc pa_unmap; + + dma_addr_t dma; + unsigned int dma_len; +}; + +/* Tx-related ring data */ +struct xgbe_tx_ring_data { + unsigned int packets; /* BQL packet count */ + unsigned int bytes; /* BQL byte count */ +}; + +/* Rx-related ring data */ +struct xgbe_rx_ring_data { + struct xgbe_buffer_data hdr; /* Header locations */ + struct xgbe_buffer_data buf; /* Payload locations */ + + unsigned short hdr_len; /* Length of received header */ + unsigned short len; /* Length of received packet */ }; /* Structure used to hold information related to the descriptor @@ -251,9 +313,9 @@ struct xgbe_ring_data { struct sk_buff *skb; /* Virtual address of SKB */ dma_addr_t skb_dma; /* DMA address of SKB data */ unsigned int skb_dma_len; /* Length of SKB DMA area */ - unsigned int tso_header; /* TSO header indicator */ - unsigned short len; /* Length of received Rx packet */ + struct xgbe_tx_ring_data tx; /* Tx-related data */ + struct xgbe_rx_ring_data rx; /* Rx-related data */ unsigned int interrupt; /* Interrupt indicator */ @@ -291,6 +353,10 @@ struct xgbe_ring { */ struct xgbe_ring_data *rdata; + /* Page allocation for RX buffers */ + struct xgbe_page_alloc rx_hdr_pa; + struct xgbe_page_alloc rx_buf_pa; + /* Ring index values * cur - Tx: index of descriptor to be used for current transfer * Rx: index of descriptor to check for packet availability @@ -307,6 +373,7 @@ struct xgbe_ring { union { struct { unsigned int queue_stopped; + unsigned int xmit_more; unsigned short cur_mss; unsigned short cur_vlan_ctag; } tx; @@ -331,6 +398,13 @@ struct xgbe_channel { unsigned int queue_index; void __iomem *dma_regs; + /* Per channel interrupt irq number */ + int dma_irq; + char dma_irq_name[IFNAMSIZ + 32]; + + /* Netdev related settings */ + struct napi_struct napi; + unsigned int saved_ier; unsigned int tx_timer_active; @@ -456,7 +530,7 @@ struct xgbe_hw_if { int (*enable_int)(struct xgbe_channel *, enum xgbe_int); int (*disable_int)(struct xgbe_channel *, enum xgbe_int); - void (*pre_xmit)(struct xgbe_channel *); + void (*dev_xmit)(struct xgbe_channel *); int (*dev_read)(struct xgbe_channel *); void (*tx_desc_init)(struct xgbe_channel *); void (*rx_desc_init)(struct xgbe_channel *); @@ -464,6 +538,7 @@ struct xgbe_hw_if { void (*tx_desc_reset)(struct xgbe_ring_data *); int (*is_last_desc)(struct xgbe_ring_desc *); int (*is_context_desc)(struct xgbe_ring_desc *); + void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *); /* For FLOW ctrl */ int (*config_tx_flow_control)(struct xgbe_prv_data *); @@ -509,14 +584,20 @@ struct xgbe_hw_if { /* For Data Center Bridging config */ void (*config_dcb_tc)(struct xgbe_prv_data *); void (*config_dcb_pfc)(struct xgbe_prv_data *); + + /* For Receive Side Scaling */ + int (*enable_rss)(struct xgbe_prv_data *); + int (*disable_rss)(struct xgbe_prv_data *); + int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *); + int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *); }; struct xgbe_desc_if { int (*alloc_ring_resources)(struct xgbe_prv_data *); void (*free_ring_resources)(struct xgbe_prv_data *); int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); - void (*realloc_skb)(struct xgbe_channel *); - void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *); + void (*realloc_rx_buffer)(struct xgbe_channel *); + void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *); void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); }; @@ -581,7 +662,11 @@ struct xgbe_prv_data { /* XPCS indirect addressing mutex */ struct mutex xpcs_mutex; - int irq_number; + /* RSS addressing mutex */ + struct mutex rss_mutex; + + int dev_irq; + unsigned int per_channel_irq; struct xgbe_hw_if hw_if; struct xgbe_desc_if desc_if; @@ -624,7 +709,7 @@ struct xgbe_prv_data { unsigned int rx_riwt; unsigned int rx_frames; - /* Current MTU */ + /* Current Rx buffer size */ unsigned int rx_buf_size; /* Flow control settings */ @@ -632,6 +717,11 @@ struct xgbe_prv_data { unsigned int tx_pause; unsigned int rx_pause; + /* Receive Side Scaling settings */ + u8 rss_key[XGBE_RSS_HASH_KEY_SIZE]; + u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE]; + u32 rss_options; + /* MDIO settings */ struct module *phy_module; char *mii_bus_id; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 123669696184fde824328fb61c9b2649f1c750c9..83a50280bb7098149624f4bfe341822845e2148f 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -761,10 +761,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) ndev = pdata->ndev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); - if (!res) { - dev_err(dev, "Resource enet_csr not defined\n"); - return -ENODEV; - } pdata->base_addr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->base_addr)) { dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); @@ -772,10 +768,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); - if (!res) { - dev_err(dev, "Resource ring_csr not defined\n"); - return -ENODEV; - } pdata->ring_csr_addr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->ring_csr_addr)) { dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); @@ -783,10 +775,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); - if (!res) { - dev_err(dev, "Resource ring_cmd not defined\n"); - return -ENODEV; - } pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->ring_cmd_addr)) { dev_err(dev, "Unable to retrieve ENET Ring command region\n"); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index c3e260c21734c37ca1641ffbf9441c867d18deb6..888247ad90680891db3591019db926f0c93c3eb4 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -62,7 +62,6 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" - depends on OF select MII select PHYLIB select FIXED_PHY if BCMGENET=y diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 416620fa8fac4655f247a3d5e50aad9ad38c7ece..d86d6baf9681f2cb637591f3830440e602a35cd7 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -836,7 +836,7 @@ static int b44_rx(struct b44 *bp, int budget) struct sk_buff *copy_skb; b44_recycle_rx(bp, cons, bp->rx_prod); - copy_skb = netdev_alloc_skb_ip_align(bp->dev, len); + copy_skb = napi_alloc_skb(&bp->napi, len); if (copy_skb == NULL) goto drop_it_no_recycle; @@ -2104,6 +2104,7 @@ static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) bp->flags &= ~B44_FLAG_WOL_ENABLE; spin_unlock_irq(&bp->lock); + device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC); return 0; } @@ -2452,6 +2453,7 @@ static int b44_init_one(struct ssb_device *sdev, } } + device_set_wakeup_capable(sdev->dev, true); netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr); return 0; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 3e8d1a88ed3d7b597298100798e5286449ffafe8..21206d33b638cf3a76e67abeb0520d71a0310b29 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -385,7 +385,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) if (len < copybreak) { struct sk_buff *nskb; - nskb = netdev_alloc_skb_ip_align(dev, len); + nskb = napi_alloc_skb(&priv->napi, len); if (!nskb) { /* forget packet, just rearm desc */ dev->stats.rx_dropped++; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 531bb7c57531b17fc64b0719690accba8afb6b1e..a91a8c263391b01c63128f1f5de58f439cef66c7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -274,6 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* RBUF misc statistics */ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), + STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), + STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), }; #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) @@ -477,6 +480,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, RX_BUF_LENGTH, DMA_FROM_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { + priv->mib.rx_dma_failed++; bcm_sysport_free_cb(cb); netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); return ret; @@ -526,6 +530,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int p_index; u16 len, status; struct bcm_rsb *rsb; + int ret; /* Determine how much we should process since last call */ p_index = rdma_readl(priv, RDMA_PROD_INDEX); @@ -620,7 +625,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, napi_gro_receive(&priv->napi, skb); refill: - bcm_sysport_rx_refill(priv, cb); + ret = bcm_sysport_rx_refill(priv, cb); + if (ret) + priv->mib.alloc_rx_buff_failed++; } return processed; @@ -731,9 +738,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* re-enable TX interrupt */ intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + + return 0; } - return 0; + return budget; } static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) @@ -971,6 +980,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); if (dma_mapping_error(kdev, mapping)) { + priv->mib.tx_dma_failed++; netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", skb->data, skb_len); ret = NETDEV_TX_OK; @@ -1399,6 +1409,27 @@ static void topctrl_flush(struct bcm_sysport_priv *priv) topctrl_writel(priv, 0, TX_FLUSH_CNTL); } +static int bcm_sysport_change_mac(struct net_device *dev, void *p) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + /* interface is disabled, changes to MAC will be reflected on next + * open call + */ + if (!netif_running(dev)) + return 0; + + umac_set_hw_addr(priv, dev->dev_addr); + + return 0; +} + static void bcm_sysport_netif_start(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -1618,6 +1649,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_stop = bcm_sysport_stop, .ndo_set_features = bcm_sysport_set_features, .ndo_set_rx_mode = bcm_sysport_set_rx_mode, + .ndo_set_mac_address = bcm_sysport_change_mac, }; #define REV_FMT "v%2x.%02x" diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index b08dab828101e7800d125d45f6e370169231f8f3..fc19417d82a505dc61c9f25a940f891522763e00 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -557,6 +557,9 @@ struct bcm_sysport_mib { u32 rxchk_other_pkt_disc; u32 rbuf_ovflow_cnt; u32 rbuf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; }; /* HW maintains a large list of counters */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 40beef5bca88ade51dd5248550c45059b8774476..1d1147c93d5972147a9aa17650eeaadb0dda7066 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1015,7 +1015,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) */ if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && (len <= RX_COPY_THRESH)) { - skb = netdev_alloc_skb_ip_align(bp->dev, len); + skb = napi_alloc_skb(&fp->napi, len); if (skb == NULL) { DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, "ERROR packet dropped because of alloc failure\n"); @@ -1139,7 +1139,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); return IRQ_HANDLED; } @@ -2099,7 +2099,7 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, if (config_hash) { /* RSS keys */ - prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4); + netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4); __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 1edc931b145848b238d2450b5c2468d029644feb..ffe4e003e636db95054df9e1b76b5198a2b5e2b1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3358,12 +3358,18 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) return T_ETH_INDIRECTION_TABLE_SIZE; } -static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) +static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) { struct bnx2x *bp = netdev_priv(dev); u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; size_t i; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + /* Get the current configuration of the RSS indirection table */ bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); @@ -3383,11 +3389,21 @@ static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) } static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct bnx2x *bp = netdev_priv(dev); size_t i; + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { /* * The same as in bnx2x_get_rxfh: we can't use a memcpy() diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 549549eaf580f79518295bd4a13234cef8520968..778e4cd325714eb34e91a369726ea1818a0c8ebe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -8119,10 +8119,11 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, case SFP_EEPROM_CON_TYPE_VAL_LC: case SFP_EEPROM_CON_TYPE_VAL_RJ45: check_limiting_mode = 1; - if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] & + if (((val[SFP_EEPROM_10G_COMP_CODE_ADDR] & (SFP_EEPROM_10G_COMP_CODE_SR_MASK | SFP_EEPROM_10G_COMP_CODE_LR_MASK | - SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) { + SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) && + (val[SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) { DP(NETIF_MSG_LINK, "1G SFP module detected\n"); phy->media_type = ETH_PHY_SFP_1G_FIBER; if (phy->req_line_speed != SPEED_1000) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 74fbf9ea7bd878e4ee3f7d1561f4b74afe46ca54..691f0bf09ee15ca972e95f84c257d0dba3d977fb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -1931,7 +1932,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) for_each_cos_in_tx_queue(fp, cos) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); status &= ~mask; } } @@ -3163,6 +3164,8 @@ static void bnx2x_pf_q_prep_general(struct bnx2x *bp, gen_init->mtu = bp->dev->mtu; gen_init->cos = cos; + + gen_init->fp_hsi = ETH_FP_HSI_VERSION; } static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, @@ -12537,7 +12540,7 @@ static int bnx2x_validate_addr(struct net_device *dev) } static int bnx2x_get_phys_port_id(struct net_device *netdev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { struct bnx2x *bp = netdev_priv(netdev); @@ -12550,6 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev, return 0; } +static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} + static const struct net_device_ops bnx2x_netdev_ops = { .ndo_open = bnx2x_open, .ndo_stop = bnx2x_close, @@ -12581,6 +12589,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { #endif .ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_set_vf_link_state = bnx2x_set_vf_link_state, + .ndo_gso_check = bnx2x_gso_check, }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 7bc2924a7e24f071d6f39adfed574dff7c2af71a..07cdf9bbffef2ee85ff3d589f33c1a10aea405d5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4336,7 +4336,7 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, test_bit(BNX2X_Q_FLG_FCOE, flags) ? LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; - gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION; + gen_data->fp_hsi_ver = params->fp_hsi; DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index e97275f456c0ae789ab3e628482d73ccf2dd949c..86baecb7c60c41a3cf096ad884efe674fb9221b3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -937,6 +937,8 @@ struct bnx2x_general_setup_params { u8 spcl_id; u16 mtu; u8 cos; + + u8 fp_hsi; }; struct bnx2x_rxq_setup_params { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index c88b20af87dff2e6613215520eeff5920b438b0f..e5aca2de1871350f3e47a788c4360b461c02a039 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -193,6 +193,7 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, /* Setup-op general parameters */ setup_p->gen_params.spcl_id = vf->sp_cl_id; setup_p->gen_params.stat_id = vfq_stat_id(vf, q); + setup_p->gen_params.fp_hsi = vf->fp_hsi; /* Setup-op pause params: * Nothing to do, the pause thresholds are set by default to 0 which diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 01bafa4ac04589364ef092e2bd5a31231e42a5f7..66ee62a0401a86c9e44db1afdb9ebac0b7ebf49e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -205,6 +205,8 @@ struct bnx2x_virtf { /* slow-path operations */ struct mutex op_mutex; /* one vfop at a time mutex */ enum channel_tlvs op_current; + + u8 fp_hsi; }; #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index b1d9c44aa56c919732721207d88163cda8080a53..be40eabc5304dad9e1ded4451b65b4dae6a03ae8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -224,6 +224,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; struct vfpf_port_phys_id_resp_tlv *phys_port_resp; + struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp; u32 vf_id; bool resources_acquired = false; @@ -237,6 +238,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) req->vfdev_info.vf_id = vf_id; req->vfdev_info.vf_os = 0; + req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION; req->resc_request.num_rxqs = rx_count; req->resc_request.num_txqs = tx_count; @@ -316,9 +318,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) memset(&bp->vf2pf_mbox->resp, 0, sizeof(union pfvf_tlvs)); } else { - /* PF reports error */ - BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", - bp->acquire_resp.hdr.status); + /* Determine reason of PF failure of acquire process */ + fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_FP_HSI_SUPPORT); + if (fp_hsi_resp && !fp_hsi_resp->is_supported) + BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n"); + else + BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", + bp->acquire_resp.hdr.status); rc = -EAGAIN; goto out; } @@ -333,6 +340,25 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) bp->flags |= HAS_PHYS_PORT_ID; } + /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV. + * If that's the case, we need to make certain required FW was + * supported by such a hypervisor [i.e., v0-v2]. + */ + fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_FP_HSI_SUPPORT); + if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) { + BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n"); + + /* Since acquire succeeded on the PF side, we need to send a + * release message in order to allow future probes. + */ + bnx2x_vfpf_finalize(bp, &req->first_tlv); + bnx2x_vfpf_release(bp); + + rc = -EINVAL; + goto out; + } + /* get HW info */ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); bp->link_params.chip_id = bp->common.chip_id; @@ -1125,6 +1151,26 @@ static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, *offset += sizeof(struct vfpf_port_phys_id_resp_tlv); } +static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp, + struct bnx2x_virtf *vf, + void *buffer, + u16 *offset) +{ + struct vfpf_fp_hsi_resp_tlv *fp_hsi; + + bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT, + sizeof(struct vfpf_fp_hsi_resp_tlv)); + + fp_hsi = (struct vfpf_fp_hsi_resp_tlv *) + (((u8 *)buffer) + *offset); + fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1; + + /* Offset should continue representing the offset to the tail + * of TLV data (outside this function scope) + */ + *offset += sizeof(struct vfpf_fp_hsi_resp_tlv); +} + static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx, int vfop_status) { @@ -1219,6 +1265,12 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, CHANNEL_TLV_PHYS_PORT_ID)) bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length); + /* `New' vfs will want to know if fastpath HSI is supported, since + * if that's not the case they could print into system log the fact + * the driver version must be updated. + */ + bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length); + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -1288,6 +1340,23 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, goto out; } + /* Verify the VF fastpath HSI can be supported by the loaded FW. + * Linux vfs should be oblivious to changes between v0 and v2. + */ + if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) + vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver; + else + vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver, + ETH_FP_HSI_VER_2); + if (vf->fp_hsi > ETH_FP_HSI_VERSION) { + DP(BNX2X_MSG_IOV, + "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n", + vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver, + ETH_FP_HSI_VERSION); + rc = -EINVAL; + goto out; + } + /* acquire the resources */ rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 15670c499a206bc8c7954e95eed1d37fb29f55d1..b86479fc0d2f80adc9a066da4e26ee9b50d80bc9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -124,7 +124,7 @@ struct vfpf_acquire_tlv { #define VF_OS_UNDEFINED (0 << VF_OS_SHIFT) #define VF_OS_WINDOWS (1 << VF_OS_SHIFT) - u8 padding; + u8 fp_hsi_ver; u8 caps; #define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0) } vfdev_info; @@ -204,6 +204,12 @@ struct vfpf_port_phys_id_resp_tlv { u8 padding[2]; }; +struct vfpf_fp_hsi_resp_tlv { + struct channel_tlv tl; + u8 is_supported; + u8 padding[3]; +}; + #define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues * stats will be coalesced on * the leading RSS queue @@ -448,6 +454,7 @@ enum channel_tlvs { CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_PHYS_PORT_ID, CHANNEL_TLV_UPDATE_TPA, + CHANNEL_TLV_FP_HSI_SUPPORT, CHANNEL_TLV_MAX }; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index da1a2500c91ce43677e7cada4b66196c17af2513..7078bd386fb76b7d473c484646209daa0df8f985 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -42,6 +42,7 @@ #include #include #include +#include #include @@ -613,6 +614,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { UMAC_RBUF_OVFL_CNT), STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), }; #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) @@ -711,6 +715,98 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, } } +static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; + u32 reg; + + if (enable && !priv->clk_eee_enabled) { + clk_prepare_enable(priv->clk_eee); + priv->clk_eee_enabled = true; + } + + reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = __raw_readl(priv->base + off); + if (enable) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); + __raw_writel(reg, priv->base + off); + + /* Do the same for thing for RBUF */ + reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); + if (enable) + reg |= RBUF_EEE_EN | RBUF_PM_EN; + else + reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); + bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); + + if (!enable && priv->clk_eee_enabled) { + clk_disable_unprepare(priv->clk_eee); + priv->clk_eee_enabled = false; + } + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; +} + +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(priv->phydev, e); +} + +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + int ret = 0; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcmgenet_eee_enable_set(dev, false); + } else { + ret = phy_init_eee(priv->phydev, 0); + if (ret) { + netif_err(priv, hw, dev, "EEE initialization failed\n"); + return ret; + } + + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); + bcmgenet_eee_enable_set(dev, true); + } + + return phy_ethtool_set_eee(priv->phydev, e); +} + +static int bcmgenet_nway_reset(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return genphy_restart_aneg(priv->phydev); +} + /* standard ethtool support functions. */ static struct ethtool_ops bcmgenet_ethtool_ops = { .get_strings = bcmgenet_get_strings, @@ -724,6 +820,9 @@ static struct ethtool_ops bcmgenet_ethtool_ops = { .set_msglevel = bcmgenet_set_msglevel, .get_wol = bcmgenet_get_wol, .set_wol = bcmgenet_set_wol, + .get_eee = bcmgenet_get_eee, + .set_eee = bcmgenet_set_eee, + .nway_reset = bcmgenet_nway_reset, }; /* Power down the unimac, based on mode. */ @@ -989,6 +1088,7 @@ static int bcmgenet_xmit_single(struct net_device *dev, mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { + priv->mib.tx_dma_failed++; netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); dev_kfree_skb(skb); return ret; @@ -1035,6 +1135,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev, skb_frag_size(frag), DMA_TO_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { + priv->mib.tx_dma_failed++; netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", __func__); return ret; @@ -1231,6 +1332,7 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) priv->rx_buf_len, DMA_FROM_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { + priv->mib.rx_dma_failed++; bcmgenet_free_cb(cb); netif_err(priv, rx_err, priv->dev, "%s DMA map failed\n", __func__); @@ -1397,8 +1499,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, /* refill RX path on the current control block */ refill: err = bcmgenet_rx_refill(priv, cb); - if (err) + if (err) { + priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, dev, "Rx refill failed\n"); + } rxpktprocessed++; priv->rx_read_ptr++; @@ -2400,6 +2504,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) struct bcmgenet_hw_params *params; u32 reg; u8 major; + u16 gphy_rev; if (GENET_IS_V4(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; @@ -2448,8 +2553,29 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) * to pass this information to the PHY driver. The PHY driver expects * to find the PHY major revision in bits 15:8 while the GENET register * stores that information in bits 7:0, account for that. + * + * On newer chips, starting with PHY revision G0, a new scheme is + * deployed similar to the Starfighter 2 switch with GPHY major + * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 + * is reserved as well as special value 0x01ff, we have a small + * heuristic to check for the new GPHY revision and re-arrange things + * so the GPHY driver is happy. */ - priv->gphy_rev = (reg & 0xffff) << 8; + gphy_rev = reg & 0xffff; + + /* This is the good old scheme, just GPHY major, no minor nor patch */ + if ((gphy_rev & 0xf0) != 0) + priv->gphy_rev = gphy_rev << 8; + + /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ + else if ((gphy_rev & 0xff00) != 0) + priv->gphy_rev = gphy_rev; + + /* This is reserved so should require special treatment */ + else if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + } #ifdef CONFIG_PHYS_ADDR_T_64BIT if (!(params->flags & GENET_HAS_40BITS)) @@ -2483,8 +2609,9 @@ static const struct of_device_id bcmgenet_match[] = { static int bcmgenet_probe(struct platform_device *pdev) { + struct bcmgenet_platform_data *pd = pdev->dev.platform_data; struct device_node *dn = pdev->dev.of_node; - const struct of_device_id *of_id; + const struct of_device_id *of_id = NULL; struct bcmgenet_priv *priv; struct net_device *dev; const void *macaddr; @@ -2498,9 +2625,11 @@ static int bcmgenet_probe(struct platform_device *pdev) return -ENOMEM; } - of_id = of_match_node(bcmgenet_match, dn); - if (!of_id) - return -EINVAL; + if (dn) { + of_id = of_match_node(bcmgenet_match, dn); + if (!of_id) + return -EINVAL; + } priv = netdev_priv(dev); priv->irq0 = platform_get_irq(pdev, 0); @@ -2512,11 +2641,15 @@ static int bcmgenet_probe(struct platform_device *pdev) goto err; } - macaddr = of_get_mac_address(dn); - if (!macaddr) { - dev_err(&pdev->dev, "can't find MAC address\n"); - err = -EINVAL; - goto err; + if (dn) { + macaddr = of_get_mac_address(dn); + if (!macaddr) { + dev_err(&pdev->dev, "can't find MAC address\n"); + err = -EINVAL; + goto err; + } + } else { + macaddr = pd->mac_address; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2556,7 +2689,10 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->dev = dev; priv->pdev = pdev; - priv->version = (enum bcmgenet_version)of_id->data; + if (of_id) + priv->version = (enum bcmgenet_version)of_id->data; + else + priv->version = pd->genet_version; priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); if (IS_ERR(priv->clk)) @@ -2577,6 +2713,12 @@ static int bcmgenet_probe(struct platform_device *pdev) if (IS_ERR(priv->clk_wol)) dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); + priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); + if (IS_ERR(priv->clk_eee)) { + dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n"); + priv->clk_eee = NULL; + } + err = reset_umac(priv); if (err) goto err_clk_disable; @@ -2727,6 +2869,9 @@ static int bcmgenet_resume(struct device *d) phy_resume(priv->phydev); + if (priv->eee.eee_enabled) + bcmgenet_eee_enable_set(dev, true); + bcmgenet_netif_start(dev); return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 31b2da5f9b821342315ddebd958a765e7560cd51..b36ddec0cc0a3c5d1c64f9c2818a4b44464c41f8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -143,6 +143,9 @@ struct bcmgenet_mib_counters { u32 rbuf_ovflow_cnt; u32 rbuf_err_cnt; u32 mdf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; }; #define UMAC_HD_BKP_CTRL 0x004 @@ -182,6 +185,21 @@ struct bcmgenet_mib_counters { #define UMAC_MAC1 0x010 #define UMAC_MAX_FRAME_LEN 0x014 +#define UMAC_EEE_CTRL 0x064 +#define EN_LPI_RX_PAUSE (1 << 0) +#define EN_LPI_TX_PFC (1 << 1) +#define EN_LPI_TX_PAUSE (1 << 2) +#define EEE_EN (1 << 3) +#define RX_FIFO_CHECK (1 << 4) +#define EEE_TX_CLK_DIS (1 << 5) +#define DIS_EEE_10M (1 << 6) +#define LP_IDLE_PREDICTION_MODE (1 << 7) + +#define UMAC_EEE_LPI_TIMER 0x068 +#define UMAC_EEE_WAKE_TIMER 0x06C +#define UMAC_EEE_REF_COUNT 0x070 +#define EEE_REFERENCE_COUNT_MASK 0xffff + #define UMAC_TX_FLUSH 0x334 #define UMAC_MIB_START 0x400 @@ -229,6 +247,10 @@ struct bcmgenet_mib_counters { #define RBUF_RXCHK_EN (1 << 0) #define RBUF_SKIP_FCS (1 << 4) +#define RBUF_ENERGY_CTRL 0x9c +#define RBUF_EEE_EN (1 << 0) +#define RBUF_PM_EN (1 << 1) + #define RBUF_TBUF_SIZE_CTRL 0xb4 #define RBUF_HFB_CTRL_V1 0x38 @@ -244,6 +266,9 @@ struct bcmgenet_mib_counters { #define TBUF_CTRL 0x00 #define TBUF_BP_MC 0x0C +#define TBUF_ENERGY_CTRL 0x14 +#define TBUF_EEE_EN (1 << 0) +#define TBUF_PM_EN (1 << 1) #define TBUF_CTRL_V1 0x80 #define TBUF_BP_MC_V1 0xA0 @@ -548,6 +573,8 @@ struct bcmgenet_priv { struct device_node *phy_dn; struct mii_bus *mii_bus; u16 gphy_rev; + struct clk *clk_eee; + bool clk_eee_enabled; /* PHY device variables */ int old_link; @@ -584,6 +611,8 @@ struct bcmgenet_priv { u32 wolopts; struct bcmgenet_mib_counters mib; + + struct ethtool_eee eee; }; #define GENET_IO_MACRO(name, offset) \ diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 933cd7e7cd33708bf89292a7bb50035165d0e867..446889cc3c6a207ebe4ef8a55ccee6a556e2fae4 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "bcmgenet.h" @@ -312,22 +313,6 @@ static int bcmgenet_mii_probe(struct net_device *dev) u32 phy_flags; int ret; - if (priv->phydev) { - pr_info("PHY already attached\n"); - return 0; - } - - /* In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { - ret = of_phy_register_fixed_link(dn); - if (ret) - return ret; - - priv->phy_dn = of_node_get(dn); - } - /* Communicate the integrated PHY revision */ phy_flags = priv->gphy_rev; @@ -337,11 +322,39 @@ static int bcmgenet_mii_probe(struct net_device *dev) priv->old_duplex = -1; priv->old_pause = -1; - phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, - phy_flags, priv->phy_interface); - if (!phydev) { - pr_err("could not attach to PHY\n"); - return -ENODEV; + if (dn) { + if (priv->phydev) { + pr_info("PHY already attached\n"); + return 0; + } + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + phydev = priv->phydev; + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + priv->phy_interface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } } priv->phydev = phydev; @@ -438,6 +451,75 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) return 0; } +static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + struct mii_bus *mdio = priv->mii_bus; + struct phy_device *phydev; + int ret; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + mdio->phy_mask = ~(1 << pd->phy_address); + else + mdio->phy_mask = 0; + + ret = mdiobus_register(mdio); + if (ret) { + dev_err(kdev, "failed to register MDIO bus\n"); + return ret; + } + + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + phydev = mdio->phy_map[pd->phy_address]; + else + phydev = phy_find_first(mdio); + + if (!phydev) { + dev_err(kdev, "failed to register PHY device\n"); + mdiobus_unregister(mdio); + return -ENODEV; + } + } else { + /* + * MoCA port or no MDIO access. + * Use fixed PHY to represent the link layer. + */ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = pd->phy_speed, + .duplex = pd->phy_duplex, + .pause = 0, + .asym_pause = 0, + }; + + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phydev || IS_ERR(phydev)) { + dev_err(kdev, "failed to register fixed PHY device\n"); + return -ENODEV; + } + } + + priv->phydev = phydev; + priv->phy_interface = pd->phy_interface; + + return 0; +} + +static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + + if (dn) + return bcmgenet_mii_of_init(priv); + else + return bcmgenet_mii_pd_init(priv); +} + int bcmgenet_mii_init(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -447,7 +529,7 @@ int bcmgenet_mii_init(struct net_device *dev) if (ret) return ret; - ret = bcmgenet_mii_of_init(priv); + ret = bcmgenet_mii_bus_init(priv); if (ret) goto out_free; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 77f8f836cbbe18a75d1ffa58fc61c077414eab5b..bb48a610b72a8db6beb501e990c5d4f1365c9495 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -10541,19 +10541,14 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) udelay(100); if (tg3_flag(tp, ENABLE_RSS)) { + u32 rss_key[10]; + tg3_rss_write_indir_tbl(tp); - /* Setup the "secret" hash key. */ - tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); - tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); - tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); - tw32(MAC_RSS_HASH_KEY_3, 0x36621985); - tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); - tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); - tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); - tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); - tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); - tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); + netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); + + for (i = 0; i < 10 ; i++) + tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); } tp->rx_mode = RX_MODE_ENABLE; @@ -12566,22 +12561,38 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev) return size; } -static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) +static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct tg3 *tp = netdev_priv(dev); int i; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) indir[i] = tp->rss_ind_tbl[i]; return 0; } -static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key) +static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, + const u8 hfunc) { struct tg3 *tp = netdev_priv(dev); size_t i; + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) tp->rss_ind_tbl[i] = indir[i]; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index c3861de9dc811a9a975e08f19e7906fc45139e29..323721838cf93e7d75e498cd98ab2cf1d78a3233 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2054,7 +2054,7 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) BFI_ENET_RSS_IPV4_TCP); rx_config->rss_config.hash_mask = bnad->num_rxp_per_rx - 1; - get_random_bytes(rx_config->rss_config.toeplitz_hash_key, + netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key, sizeof(rx_config->rss_config.toeplitz_hash_key)); } else { rx_config->rss_status = BNA_STATUS_T_DISABLED; diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 3564fe9d3f69bab9e5a8536fe551e0b345591121..321d2ad235d9143547efdb9d3100b8d91fd45d68 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -35,8 +35,8 @@ config MACB ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and AT91 parts. This driver also supports the Cadence GEM (Gigabit - Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode - is not yet supported. Say Y to include support for the MACB/GEM chip. + Ethernet MAC found in some ARM SoC devices). Say Y to include + support for the MACB/GEM chip. To compile this driver as a module, choose M here: the module will be called macb. diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 4d9fc0509af68c91cfd7c7e45f5aa3ec96b7bf88..b6bc318b148e34a0e502ab1dd1a8c3fc88583cc3 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -776,7 +776,7 @@ static int gem_rx(struct macb *bp, int budget) netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", skb->len, skb->csum); print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, - skb->mac_header, 16, true); + skb_mac_header(skb), 16, true); print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, skb->data, 32, true); #endif diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 4c58793890030db331ef473d5eeaf90f9f832f16..babe2a915b0050936edcecf86675d43610794a70 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -301,7 +301,7 @@ unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, struct sched_port *p = &s->p[port]; unsigned int max_avail_segs; - pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); + pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed); if (speed) p->speed = speed; if (mtu) @@ -1025,7 +1025,7 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold"); /** * get_packet - return the next ingress packet buffer - * @pdev: the PCI device that received the packet + * @adapter: the adapter that received the packet * @fl: the SGE free list holding the packet * @len: the actual packet length, excluding any SGE padding * @@ -1037,14 +1037,15 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold"); * threshold and the packet is too big to copy, or (b) the packet should * be copied but there is no memory for the copy. */ -static inline struct sk_buff *get_packet(struct pci_dev *pdev, +static inline struct sk_buff *get_packet(struct adapter *adapter, struct freelQ *fl, unsigned int len) { - struct sk_buff *skb; const struct freelQ_ce *ce = &fl->centries[fl->cidx]; + struct pci_dev *pdev = adapter->pdev; + struct sk_buff *skb; if (len < copybreak) { - skb = netdev_alloc_skb_ip_align(NULL, len); + skb = napi_alloc_skb(&adapter->napi, len); if (!skb) goto use_orig_buf; @@ -1357,7 +1358,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) struct sge_port_stats *st; struct net_device *dev; - skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); + skb = get_packet(adapter, fl, len - sge->rx_pkt_pad); if (unlikely(!skb)) { sge->stats.rx_drops++; return; diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 1df65c915b995228d7b8d22173541c7f1d4ee4af..b8528077599730ec48853fc6ab6495a72e04f826 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o +cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 3c481b2607458ad453a975f2842fcce40476c720..a18d33fdb27107e2252bc12177d21b4f0f4aeb99 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -50,13 +50,13 @@ #include "cxgb4_uld.h" #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x0B -#define T4FW_VERSION_MICRO 0x1B +#define T4FW_VERSION_MINOR 0x0C +#define T4FW_VERSION_MICRO 0x19 #define T4FW_VERSION_BUILD 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0B -#define T5FW_VERSION_MICRO 0x1B +#define T5FW_VERSION_MINOR 0x0C +#define T5FW_VERSION_MICRO 0x19 #define T5FW_VERSION_BUILD 0x00 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) @@ -222,6 +222,12 @@ struct tp_err_stats { u32 ofldCongDefer; }; +struct sge_params { + u32 hps; /* host page size for our PF/VF */ + u32 eq_qpp; /* egress queues/page for our PF/VF */ + u32 iq_qpp; /* egress queues/page for our PF/VF */ +}; + struct tp_params { unsigned int ntxchan; /* # of Tx channels */ unsigned int tre; /* log2 of core clocks per TP tick */ @@ -285,6 +291,7 @@ enum chip_type { }; struct adapter_params { + struct sge_params sge; struct tp_params tp; struct vpd_params vpd; struct pci_params pci; @@ -318,10 +325,10 @@ struct adapter_params { #include "t4fw_api.h" #define FW_VERSION(chip) ( \ - FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ - FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ - FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ - FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) + FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ + FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ + FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ + FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) struct fw_info { @@ -354,7 +361,7 @@ struct link_config { unsigned char link_ok; /* link up? */ }; -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) enum { MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ @@ -431,7 +438,8 @@ struct sge_fl { /* SGE free-buffer queue state */ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ __be64 *desc; /* address of HW Rx descriptor ring */ dma_addr_t addr; /* bus address of HW ring start */ - u64 udb; /* BAR2 offset of User Doorbell area */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; /* A packet gather list */ @@ -461,7 +469,8 @@ struct sge_rspq { /* state for an SGE response queue */ u16 abs_id; /* absolute SGE id for the response q */ __be64 *desc; /* address of HW response ring */ dma_addr_t phys_addr; /* physical address of the ring */ - u64 udb; /* BAR2 offset of User Doorbell area */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ unsigned int iqe_len; /* entry size */ unsigned int size; /* capacity of response queue */ struct adapter *adap; @@ -519,7 +528,8 @@ struct sge_txq { int db_disabled; unsigned short db_pidx; unsigned short db_pidx_inc; - u64 udb; /* BAR2 offset of User Doorbell area */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ @@ -995,6 +1005,15 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); + +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + +int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); @@ -1085,4 +1104,5 @@ void t4_db_dropped(struct adapter *adapter); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); void t4_sge_decode_idma_state(struct adapter *adapter, int state); +void t4_free_mem(void *addr); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 4fe33606f372755efb2bcef46f28c68b7d763312..a35d1ec6950e48bcf6003e9f9ccf64b9f2d8a431 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -243,7 +243,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, const struct fw_port_cmd *pcmd) { const union fw_port_dcb *fwdcb = &pcmd->u.dcb; - int port = FW_PORT_CMD_PORTID_GET(be32_to_cpu(pcmd->op_to_portid)); + int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid)); struct net_device *dev = adap->port[port]; struct port_info *pi = netdev_priv(dev); struct port_dcb_info *dcb = &pi->dcb; @@ -256,12 +256,12 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) { enum cxgb4_dcb_state_input input = ((pcmd->u.dcb.control.all_syncd_pkd & - FW_PORT_CMD_ALL_SYNCD) + FW_PORT_CMD_ALL_SYNCD_F) ? CXGB4_DCB_STATE_FW_ALLSYNCED : CXGB4_DCB_STATE_FW_INCOMPLETE); if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) { - dcb_running_version = FW_PORT_CMD_DCB_VERSION_GET( + dcb_running_version = FW_PORT_CMD_DCB_VERSION_G( be16_to_cpu( pcmd->u.dcb.control.dcb_version_to_app_state)); if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 || @@ -519,7 +519,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); if (err != FW_PORT_DCB_CFG_SUCCESS) @@ -583,7 +583,7 @@ static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid, INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); @@ -623,7 +623,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC; pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; @@ -842,7 +842,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, /* write out new app table entry */ INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID; pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index 2a6aa88984f44f31d34cc55664705ac6b8dc4a90..31ce425616c9cdb112f417cb5c19c89c77a1e14a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -42,12 +42,12 @@ do { \ memset(&(__pcmd), 0, sizeof(__pcmd)); \ (__pcmd).op_to_portid = \ - cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | \ - FW_CMD_REQUEST | \ - FW_CMD_##__op | \ - FW_PORT_CMD_PORTID(__port)); \ + cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | \ + FW_CMD_REQUEST_F | \ + FW_CMD_##__op##_F | \ + FW_PORT_CMD_PORTID_V(__port)); \ (__pcmd).action_to_len16 = \ - cpu_to_be32(FW_PORT_CMD_ACTION(__action) | \ + cpu_to_be32(FW_PORT_CMD_ACTION_V(__action) | \ FW_LEN16(pcmd)); \ } while (0) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..c98a350d857e27ac3f3216765e6ec797f689d908 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -0,0 +1,158 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4fw_api.h" +#include "cxgb4_debugfs.h" +#include "l2t.h" + +static ssize_t mem_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file_inode(file)->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct adapter *adap = file->private_data - mem; + __be32 *data; + int ret; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + data = t4_alloc_mem(count); + if (!data) + return -ENOMEM; + + spin_lock(&adap->win0_lock); + ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); + spin_unlock(&adap->win0_lock); + if (ret) { + t4_free_mem(data); + return ret; + } + ret = copy_to_user(buf, data, count); + + t4_free_mem(data); + if (ret) + return -EFAULT; + + *ppos = pos + count; + return count; +} + +static const struct file_operations mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mem_read, + .llseek = default_llseek, +}; + +static void add_debugfs_mem(struct adapter *adap, const char *name, + unsigned int idx, unsigned int size_mb) +{ + struct dentry *de; + + de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, + (void *)adap + idx, &mem_debugfs_fops); + if (de && de->d_inode) + de->d_inode->i_size = size_mb << 20; +} + +/* Add an array of Debug FS files. + */ +void add_debugfs_files(struct adapter *adap, + struct t4_debugfs_entry *files, + unsigned int nfiles) +{ + int i; + + /* debugfs support is best effort */ + for (i = 0; i < nfiles; i++) + debugfs_create_file(files[i].name, files[i].mode, + adap->debugfs_root, + (void *)adap + files[i].data, + files[i].ops); +} + +int t4_setup_debugfs(struct adapter *adap) +{ + int i; + u32 size; + + static struct t4_debugfs_entry t4_debugfs_files[] = { + { "l2t", &t4_l2t_fops, S_IRUSR, 0}, + }; + + add_debugfs_files(adap, + t4_debugfs_files, + ARRAY_SIZE(t4_debugfs_files)); + + i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (i & EDRAM0_ENABLE_F) { + size = t4_read_reg(adap, MA_EDRAM0_BAR_A); + add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM0_SIZE_G(size)); + } + if (i & EDRAM1_ENABLE_F) { + size = t4_read_reg(adap, MA_EDRAM1_BAR_A); + add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size)); + } + if (is_t4(adap->params.chip)) { + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); + if (i & EXT_MEM_ENABLE_F) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_G(size)); + } else { + if (i & EXT_MEM0_ENABLE_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + add_debugfs_mem(adap, "mc0", MEM_MC0, + EXT_MEM0_SIZE_G(size)); + } + if (i & EXT_MEM1_ENABLE_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + add_debugfs_mem(adap, "mc1", MEM_MC1, + EXT_MEM1_SIZE_G(size)); + } + } + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..a3d8867efd3d7aff3e59c0cac6c71a7561cd1364 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h @@ -0,0 +1,52 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_DEBUGFS_H +#define __CXGB4_DEBUGFS_H + +#include + +struct t4_debugfs_entry { + const char *name; + const struct file_operations *ops; + mode_t mode; + unsigned char data; +}; + +int t4_setup_debugfs(struct adapter *adap); +void add_debugfs_files(struct adapter *adap, + struct t4_debugfs_entry *files, + unsigned int nfiles); + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 279873cb6e3ac33b196d4689daeefdb7c1bf87f5..973dbb7938c3f7c8107a5ca2bbb577f6e452e0ff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -61,6 +61,7 @@ #include #include #include +#include #include #include "cxgb4.h" @@ -68,10 +69,9 @@ #include "t4_msg.h" #include "t4fw_api.h" #include "cxgb4_dcb.h" +#include "cxgb4_debugfs.h" #include "l2t.h" -#include <../drivers/net/bonding/bonding.h> - #ifdef DRV_VERSION #undef DRV_VERSION #endif @@ -141,7 +141,7 @@ static unsigned int pfvfres_pmask(struct adapter *adapter, * Give PF's access to all of the ports. */ if (vf == 0) - return FW_PFVF_CMD_PMASK_MASK; + return FW_PFVF_CMD_PMASK_M; /* * For VFs, we'll assign them access to the ports based purely on the @@ -210,114 +210,25 @@ struct filter_entry { NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) -#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) } - -static const struct pci_device_id cxgb4_pci_tbl[] = { - CH_DEVICE(0xa000, 0), /* PE10K */ - CH_DEVICE(0x4001, -1), - CH_DEVICE(0x4002, -1), - CH_DEVICE(0x4003, -1), - CH_DEVICE(0x4004, -1), - CH_DEVICE(0x4005, -1), - CH_DEVICE(0x4006, -1), - CH_DEVICE(0x4007, -1), - CH_DEVICE(0x4008, -1), - CH_DEVICE(0x4009, -1), - CH_DEVICE(0x400a, -1), - CH_DEVICE(0x400d, -1), - CH_DEVICE(0x400e, -1), - CH_DEVICE(0x4080, -1), - CH_DEVICE(0x4081, -1), - CH_DEVICE(0x4082, -1), - CH_DEVICE(0x4083, -1), - CH_DEVICE(0x4084, -1), - CH_DEVICE(0x4085, -1), - CH_DEVICE(0x4086, -1), - CH_DEVICE(0x4087, -1), - CH_DEVICE(0x4088, -1), - CH_DEVICE(0x4401, 4), - CH_DEVICE(0x4402, 4), - CH_DEVICE(0x4403, 4), - CH_DEVICE(0x4404, 4), - CH_DEVICE(0x4405, 4), - CH_DEVICE(0x4406, 4), - CH_DEVICE(0x4407, 4), - CH_DEVICE(0x4408, 4), - CH_DEVICE(0x4409, 4), - CH_DEVICE(0x440a, 4), - CH_DEVICE(0x440d, 4), - CH_DEVICE(0x440e, 4), - CH_DEVICE(0x4480, 4), - CH_DEVICE(0x4481, 4), - CH_DEVICE(0x4482, 4), - CH_DEVICE(0x4483, 4), - CH_DEVICE(0x4484, 4), - CH_DEVICE(0x4485, 4), - CH_DEVICE(0x4486, 4), - CH_DEVICE(0x4487, 4), - CH_DEVICE(0x4488, 4), - CH_DEVICE(0x5001, 4), - CH_DEVICE(0x5002, 4), - CH_DEVICE(0x5003, 4), - CH_DEVICE(0x5004, 4), - CH_DEVICE(0x5005, 4), - CH_DEVICE(0x5006, 4), - CH_DEVICE(0x5007, 4), - CH_DEVICE(0x5008, 4), - CH_DEVICE(0x5009, 4), - CH_DEVICE(0x500A, 4), - CH_DEVICE(0x500B, 4), - CH_DEVICE(0x500C, 4), - CH_DEVICE(0x500D, 4), - CH_DEVICE(0x500E, 4), - CH_DEVICE(0x500F, 4), - CH_DEVICE(0x5010, 4), - CH_DEVICE(0x5011, 4), - CH_DEVICE(0x5012, 4), - CH_DEVICE(0x5013, 4), - CH_DEVICE(0x5014, 4), - CH_DEVICE(0x5015, 4), - CH_DEVICE(0x5080, 4), - CH_DEVICE(0x5081, 4), - CH_DEVICE(0x5082, 4), - CH_DEVICE(0x5083, 4), - CH_DEVICE(0x5084, 4), - CH_DEVICE(0x5085, 4), - CH_DEVICE(0x5086, 4), - CH_DEVICE(0x5087, 4), - CH_DEVICE(0x5088, 4), - CH_DEVICE(0x5401, 4), - CH_DEVICE(0x5402, 4), - CH_DEVICE(0x5403, 4), - CH_DEVICE(0x5404, 4), - CH_DEVICE(0x5405, 4), - CH_DEVICE(0x5406, 4), - CH_DEVICE(0x5407, 4), - CH_DEVICE(0x5408, 4), - CH_DEVICE(0x5409, 4), - CH_DEVICE(0x540A, 4), - CH_DEVICE(0x540B, 4), - CH_DEVICE(0x540C, 4), - CH_DEVICE(0x540D, 4), - CH_DEVICE(0x540E, 4), - CH_DEVICE(0x540F, 4), - CH_DEVICE(0x5410, 4), - CH_DEVICE(0x5411, 4), - CH_DEVICE(0x5412, 4), - CH_DEVICE(0x5413, 4), - CH_DEVICE(0x5414, 4), - CH_DEVICE(0x5415, 4), - CH_DEVICE(0x5480, 4), - CH_DEVICE(0x5481, 4), - CH_DEVICE(0x5482, 4), - CH_DEVICE(0x5483, 4), - CH_DEVICE(0x5484, 4), - CH_DEVICE(0x5485, 4), - CH_DEVICE(0x5486, 4), - CH_DEVICE(0x5487, 4), - CH_DEVICE(0x5488, 4), - { 0, } -}; +/* Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static struct pci_device_id cxgb4_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x4 + +/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is + * called for both. + */ +#define CH_PCI_DEVICE_ID_FUNCTION2 0x0 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + {PCI_VDEVICE(CHELSIO, (devid)), 4} + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ + { 0, } \ + } + +#include "t4_pci_id_tbl.h" #define FW4_FNAME "cxgb4/t4fw.bin" #define FW5_FNAME "cxgb4/t5fw.bin" @@ -512,9 +423,10 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) u32 name, value; int err; - name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | - FW_PARAMS_PARAM_YZ(txq->q.cntxt_id)); + name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | + FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); value = enable ? i : 0xffffffff; /* Since we can be called while atomic (from "interrupt @@ -709,7 +621,7 @@ EXPORT_SYMBOL(cxgb4_dcb_enabled); /* Handle a Data Center Bridging update message from the firmware. */ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) { - int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid)); + int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); struct net_device *dev = adap->port[port]; int old_dcb_enabled = cxgb4_dcb_enabled(dev); int new_dcb_enabled; @@ -832,17 +744,17 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, #ifdef CONFIG_CHELSIO_T4_DCB const struct fw_port_cmd *pcmd = (const void *)p->data; - unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid)); + unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); unsigned int action = - FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16)); + FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); if (cmd == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { - int port = FW_PORT_CMD_PORTID_GET( + int port = FW_PORT_CMD_PORTID_G( be32_to_cpu(pcmd->op_to_portid)); struct net_device *dev = q->adap->port[port]; int state_input = ((pcmd->u.info.dcbxdis_pkd & - FW_PORT_CMD_DCBXDIS) + FW_PORT_CMD_DCBXDIS_F) ? CXGB4_DCB_INPUT_FW_DISABLED : CXGB4_DCB_INPUT_FW_ENABLED); @@ -1287,7 +1199,7 @@ void *t4_alloc_mem(size_t size) /* * Free memory allocated through alloc_mem(). */ -static void t4_free_mem(void *addr) +void t4_free_mem(void *addr) { if (is_vmalloc_addr(addr)) vfree(addr); @@ -1339,52 +1251,52 @@ static int set_filter_wr(struct adapter *adapter, int fidx) * filter specification structure but for now it's easiest to simply * put this fairly direct code in line ... */ - fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); - fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16)); + fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); + fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16)); fwr->tid_to_iq = - htonl(V_FW_FILTER_WR_TID(ftid) | - V_FW_FILTER_WR_RQTYPE(f->fs.type) | - V_FW_FILTER_WR_NOREPLY(0) | - V_FW_FILTER_WR_IQ(f->fs.iq)); + htonl(FW_FILTER_WR_TID_V(ftid) | + FW_FILTER_WR_RQTYPE_V(f->fs.type) | + FW_FILTER_WR_NOREPLY_V(0) | + FW_FILTER_WR_IQ_V(f->fs.iq)); fwr->del_filter_to_l2tix = - htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | - V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | - V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | - V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | - V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | - V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | - V_FW_FILTER_WR_DMAC(f->fs.newdmac) | - V_FW_FILTER_WR_SMAC(f->fs.newsmac) | - V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || + htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) | + FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) | + FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) | + FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) | + FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) | + FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) | + FW_FILTER_WR_DMAC_V(f->fs.newdmac) | + FW_FILTER_WR_SMAC_V(f->fs.newsmac) | + FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) | - V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || + FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | - V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | - V_FW_FILTER_WR_TXCHAN(f->fs.eport) | - V_FW_FILTER_WR_PRIO(f->fs.prio) | - V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); + FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) | + FW_FILTER_WR_TXCHAN_V(f->fs.eport) | + FW_FILTER_WR_PRIO_V(f->fs.prio) | + FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0)); fwr->ethtype = htons(f->fs.val.ethtype); fwr->ethtypem = htons(f->fs.mask.ethtype); fwr->frag_to_ovlan_vldm = - (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | - V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | - V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) | - V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) | - V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) | - V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld)); + (FW_FILTER_WR_FRAG_V(f->fs.val.frag) | + FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) | + FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) | + FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) | + FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) | + FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld)); fwr->smac_sel = 0; fwr->rx_chan_rx_rpl_iq = - htons(V_FW_FILTER_WR_RX_CHAN(0) | - V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id)); + htons(FW_FILTER_WR_RX_CHAN_V(0) | + FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id)); fwr->maci_to_matchtypem = - htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | - V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | - V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | - V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | - V_FW_FILTER_WR_PORT(f->fs.val.iport) | - V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | - V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | - V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); + htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) | + FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) | + FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) | + FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) | + FW_FILTER_WR_PORT_V(f->fs.val.iport) | + FW_FILTER_WR_PORTM_V(f->fs.mask.iport) | + FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) | + FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype)); fwr->ptcl = f->fs.val.proto; fwr->ptclm = f->fs.mask.proto; fwr->ttyp = f->fs.val.tos; @@ -1615,14 +1527,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) if (adapter->params.fw_vers) snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -2721,9 +2633,10 @@ static int set_rspq_intr_params(struct sge_rspq *q, new_idx = closest_thres(&adap->sge, cnt); if (q->desc && q->pktcnt_idx != new_idx) { /* the queue has already been created, update it */ - v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | - FW_PARAMS_PARAM_YZ(q->cntxt_id); + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ_V(q->cntxt_id); err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, &new_idx); if (err) @@ -2937,7 +2850,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) int ret; const struct firmware *fw; struct adapter *adap = netdev2adap(netdev); - unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1; + unsigned int mbox = PCIE_FW_MASTER_M + 1; ef->data[sizeof(ef->data) - 1] = '\0'; ret = request_firmware(&fw, ef->data, adap->pdev_dev); @@ -3014,21 +2927,35 @@ static u32 get_rss_table_size(struct net_device *dev) return pi->rss_size; } -static int get_rss_table(struct net_device *dev, u32 *p, u8 *key) +static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc) { const struct port_info *pi = netdev_priv(dev); unsigned int n = pi->rss_size; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; while (n--) p[n] = pi->rss[n]; return 0; } -static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key) +static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, + const u8 hfunc) { unsigned int i; struct port_info *pi = netdev_priv(dev); + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!p) + return 0; + for (i = 0; i < pi->rss_size; i++) pi->rss[i] = p[i]; if (pi->adapter->flags & FULL_INIT_DONE) @@ -3048,45 +2975,45 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, info->data = 0; switch (info->flow_type) { case TCP_V4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case UDP_V4_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case IPV4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case TCP_V6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case UDP_V6_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case IPV6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; } @@ -3131,102 +3058,14 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .flash_device = set_flash, }; -/* - * debugfs support - */ -static ssize_t mem_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) -{ - loff_t pos = *ppos; - loff_t avail = file_inode(file)->i_size; - unsigned int mem = (uintptr_t)file->private_data & 3; - struct adapter *adap = file->private_data - mem; - __be32 *data; - int ret; - - if (pos < 0) - return -EINVAL; - if (pos >= avail) - return 0; - if (count > avail - pos) - count = avail - pos; - - data = t4_alloc_mem(count); - if (!data) - return -ENOMEM; - - spin_lock(&adap->win0_lock); - ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); - spin_unlock(&adap->win0_lock); - if (ret) { - t4_free_mem(data); - return ret; - } - ret = copy_to_user(buf, data, count); - - t4_free_mem(data); - if (ret) - return -EFAULT; - - *ppos = pos + count; - return count; -} - -static const struct file_operations mem_debugfs_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = mem_read, - .llseek = default_llseek, -}; - -static void add_debugfs_mem(struct adapter *adap, const char *name, - unsigned int idx, unsigned int size_mb) -{ - struct dentry *de; - - de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, - (void *)adap + idx, &mem_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = size_mb << 20; -} - static int setup_debugfs(struct adapter *adap) { - int i; - u32 size; - if (IS_ERR_OR_NULL(adap->debugfs_root)) return -1; - i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); - if (i & EDRAM0_ENABLE) { - size = t4_read_reg(adap, MA_EDRAM0_BAR); - add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size)); - } - if (i & EDRAM1_ENABLE) { - size = t4_read_reg(adap, MA_EDRAM1_BAR); - add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); - } - if (is_t4(adap->params.chip)) { - size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); - if (i & EXT_MEM_ENABLE) - add_debugfs_mem(adap, "mc", MEM_MC, - EXT_MEM_SIZE_GET(size)); - } else { - if (i & EXT_MEM_ENABLE) { - size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); - add_debugfs_mem(adap, "mc0", MEM_MC0, - EXT_MEM_SIZE_GET(size)); - } - if (i & EXT_MEM1_ENABLE) { - size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR); - add_debugfs_mem(adap, "mc1", MEM_MC1, - EXT_MEM_SIZE_GET(size)); - } - } - if (adap->l2t) - debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, - &t4_l2t_fops); +#ifdef CONFIG_DEBUG_FS + t4_setup_debugfs(adap); +#endif return 0; } @@ -3508,9 +3347,9 @@ int cxgb4_clip_get(const struct net_device *dev, adap = netdev2adap(dev); memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); + c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c)); c.ip_hi = *(__be64 *)(lip->s6_addr); c.ip_lo = *(__be64 *)(lip->s6_addr + 8); return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); @@ -3525,9 +3364,9 @@ int cxgb4_clip_release(const struct net_device *dev, adap = netdev2adap(dev); memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); - c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); + c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c)); c.ip_hi = *(__be64 *)(lip->s6_addr); c.ip_lo = *(__be64 *)(lip->s6_addr + 8); return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); @@ -3568,7 +3407,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, req->local_ip = sip; req->peer_ip = htonl(0); chan = rxq_to_chan(&adap->sge, queue); - req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); ret = t4_mgmt_tx(adap, skb); @@ -3611,7 +3450,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, req->peer_ip_hi = cpu_to_be64(0); req->peer_ip_lo = cpu_to_be64(0); chan = rxq_to_chan(&adap->sge, queue); - req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); ret = t4_mgmt_tx(adap, skb); @@ -3893,7 +3732,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) { struct adapter *adap; u32 offset, memtype, memaddr; - u32 edc0_size, edc1_size, mc0_size, mc1_size; + u32 edc0_size, edc1_size, mc0_size, mc1_size, size; u32 edc0_end, edc1_end, mc0_end, mc1_end; int ret; @@ -3907,9 +3746,12 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) * and EDC1. Some cards will have neither MC0 nor MC1, most cards have * MC0, and some have both MC0 and MC1. */ - edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20; - edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20; - mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20; + size = t4_read_reg(adap, MA_EDRAM0_BAR_A); + edc0_size = EDRAM0_SIZE_G(size) << 20; + size = t4_read_reg(adap, MA_EDRAM1_BAR_A); + edc1_size = EDRAM1_SIZE_G(size) << 20; + size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + mc0_size = EXT_MEM0_SIZE_G(size) << 20; edc0_end = edc0_size; edc1_end = edc0_end + edc1_size; @@ -3929,9 +3771,8 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) /* T4 only has a single memory channel */ goto err; } else { - mc1_size = EXT_MEM_SIZE_GET( - t4_read_reg(adap, - MA_EXT_MEMORY1_BAR)) << 20; + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + mc1_size = EXT_MEM1_SIZE_G(size) << 20; mc1_end = mc0_end + mc1_size; if (offset < mc1_end) { memtype = MEM_MC1; @@ -3968,6 +3809,22 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev) } EXPORT_SYMBOL(cxgb4_read_sge_timestamp); +int cxgb4_bar2_sge_qregs(struct net_device *dev, + unsigned int qid, + enum cxgb4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev), + qid, + (qtype == CXGB4_BAR2_QTYPE_EGRESS + ? T4_BAR2_QTYPE_EGRESS + : T4_BAR2_QTYPE_INGRESS), + pbar2_qoffset, + pbar2_qid); +} +EXPORT_SYMBOL(cxgb4_bar2_sge_qregs); + static struct pci_driver cxgb4_driver; static void check_neigh_update(struct neighbour *neigh) @@ -4150,31 +4007,18 @@ static void process_db_drop(struct work_struct *work) u32 dropped_db = t4_read_reg(adap, 0x010ac); u16 qid = (dropped_db >> 15) & 0x1ffff; u16 pidx_inc = dropped_db & 0x1fff; - unsigned int s_qpp; - unsigned short udb_density; - unsigned long qpshift; - int page; - u32 udb; - - dev_warn(adap->pdev_dev, - "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n", - dropped_db, qid, - (dropped_db >> 14) & 1, - (dropped_db >> 13) & 1, - pidx_inc); + u64 bar2_qoffset; + unsigned int bar2_qid; + int ret; - drain_db_fifo(adap, 1); - - s_qpp = QUEUESPERPAGEPF1 * adap->fn; - udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap, - SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); - qpshift = PAGE_SHIFT - ilog2(udb_density); - udb = qid << qpshift; - udb &= PAGE_MASK; - page = udb / PAGE_SIZE; - udb += (qid - (page * udb_density)) * 128; - - writel(PIDX(pidx_inc), adap->bar2 + udb + 8); + ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, + &bar2_qoffset, &bar2_qid); + if (ret) + dev_err(adap->pdev_dev, "doorbell drop recovery: " + "qid=%d, pidx_inc=%d\n", qid, pidx_inc); + else + writel(PIDX_T5(pidx_inc) | QID(bar2_qid), + adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); /* Re-enable BAR2 WC */ t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); @@ -4232,12 +4076,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.adapter_type = adap->params.chip; lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; - lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> - (adap->fn * 4)); - lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> - (adap->fn * 4)); + lli.udb_density = 1 << adap->params.sge.eq_qpp; + lli.ucq_density = 1 << adap->params.sge.iq_qpp; lli.filt_mode = adap->params.tp.vlan_pri_map; /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ for (i = 0; i < NCHAN; i++) @@ -4399,8 +4239,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, if (cxgb4_netdev(event_dev)) { switch (event) { case NETDEV_UP: - ret = cxgb4_clip_get(event_dev, - (const struct in6_addr *)ifa->addr.s6_addr); + ret = cxgb4_clip_get(event_dev, &ifa->addr); if (ret < 0) { rcu_read_unlock(); return ret; @@ -4408,8 +4247,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, ret = NOTIFY_OK; break; case NETDEV_DOWN: - cxgb4_clip_release(event_dev, - (const struct in6_addr *)ifa->addr.s6_addr); + cxgb4_clip_release(event_dev, &ifa->addr); ret = NOTIFY_OK; break; default: @@ -4478,8 +4316,7 @@ static int update_dev_clip(struct net_device *root_dev, struct net_device *dev) read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { - ret = cxgb4_clip_get(dev, - (const struct in6_addr *)ifa->addr.s6_addr); + ret = cxgb4_clip_get(dev, &ifa->addr); if (ret < 0) break; } @@ -4960,14 +4797,14 @@ static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg) */ memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = - htonl(FW_CMD_OP(FW_LDST_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | - FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); + htonl(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE)); ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); - ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1); + ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); ldst_cmd.u.pcie.ctrl_to_fn = - (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn)); + (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn)); ldst_cmd.u.pcie.r = reg; ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); @@ -5054,8 +4891,8 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) /* get device capabilities */ memset(c, 0, sizeof(*c)); - c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); + c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); if (ret < 0) @@ -5071,16 +4908,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) dev_err(adap->pdev_dev, "virtualization ACLs not supported"); return ret; } - c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); + c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); if (ret < 0) return ret; ret = t4_config_glbl_rss(adap, adap->fn, FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, - FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | - FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F); if (ret < 0) return ret; @@ -5241,8 +5078,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (cf->size >= FLASH_CFG_MAX_SIZE) ret = -ENOMEM; else { - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); ret = t4_query_params(adapter, adapter->mbox, adapter->fn, 0, 1, params, val); if (ret == 0) { @@ -5260,8 +5097,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) size_t size = cf->size & ~0x3; __be32 *data = (__be32 *)cf->data; - mtype = FW_PARAMS_PARAM_Y_GET(val[0]); - maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16; + mtype = FW_PARAMS_PARAM_Y_G(val[0]); + maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16; spin_lock(&adapter->win0_lock); ret = t4_memory_rw(adapter, 0, mtype, maddr, @@ -5298,13 +5135,13 @@ static int adap_init0_config(struct adapter *adapter, int reset) */ memset(&caps_cmd, 0, sizeof(caps_cmd)); caps_cmd.op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = - htonl(FW_CAPS_CONFIG_CMD_CFVALID | - FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | - FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | + htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -5318,9 +5155,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (ret == -ENOENT) { memset(&caps_cmd, 0, sizeof(caps_cmd)); caps_cmd.op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -5343,9 +5180,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) * And now tell the firmware to use the configuration we just loaded. */ caps_cmd.op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), NULL); @@ -5416,8 +5253,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) * Get device capabilities and select which we'll be using. */ memset(&caps_cmd, 0, sizeof(caps_cmd)); - caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -5433,8 +5270,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) dev_err(adapter->pdev_dev, "virtualization ACLs not supported"); goto bye; } - caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), NULL); if (ret < 0) @@ -5456,10 +5293,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) adapter->flags |= RSS_TNLALLLOOKUP; ret = t4_config_glbl_rss(adapter, adapter->mbox, FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, - FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | - FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | + FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F | ((adapter->flags & RSS_TNLALLLOOKUP) ? - FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0)); + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0)); if (ret < 0) goto bye; @@ -5470,7 +5307,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) PFRES_NEQ, PFRES_NETHCTRL, PFRES_NIQFLINT, PFRES_NIQ, PFRES_TC, PFRES_NVI, - FW_PFVF_CMD_CMASK_MASK, + FW_PFVF_CMD_CMASK_M, pfvfres_pmask(adapter, adapter->fn, 0), PFRES_NEXACTF, PFRES_R_CAPS, PFRES_WX_CAPS); @@ -5515,7 +5352,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) VFRES_NEQ, VFRES_NETHCTRL, VFRES_NIQFLINT, VFRES_NIQ, VFRES_TC, VFRES_NVI, - FW_PFVF_CMD_CMASK_MASK, + FW_PFVF_CMD_CMASK_M, pfvfres_pmask( adapter, pf, vf), VFRES_NEXACTF, @@ -5687,14 +5524,8 @@ static int adap_init0(struct adapter *adap) struct fw_caps_config_cmd caps_cmd; int reset = 1; - /* - * Contact FW, advertising Master capability (and potentially forcing - * ourselves as the Master PF if our module parameter force_init is - * set). - */ - ret = t4_fw_hello(adap, adap->mbox, adap->fn, - force_init ? MASTER_MUST : MASTER_MAY, - &state); + /* Contact FW, advertising Master capability */ + ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); if (ret < 0) { dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", ret); @@ -5702,8 +5533,6 @@ static int adap_init0(struct adapter *adap) } if (ret == adap->mbox) adap->flags |= MASTER_PF; - if (force_init && state == DEV_STATE_INIT) - state = DEV_STATE_UNINIT; /* * If we're the Master PF Driver and the device is uninitialized, @@ -5779,8 +5608,8 @@ static int adap_init0(struct adapter *adap) * and portvec ... */ v = - FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); if (ret < 0) goto bye; @@ -5802,7 +5631,6 @@ static int adap_init0(struct adapter *adap) } else { dev_info(adap->pdev_dev, "Coming up as MASTER: "\ "Initializing adapter\n"); - /* * If the firmware doesn't support Configuration * Files warn user and exit, @@ -5817,8 +5645,9 @@ static int adap_init0(struct adapter *adap) * Find out whether we're dealing with a version of * the firmware which has configuration file support. */ - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DEV_CF)); ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, params, val); @@ -5878,14 +5707,14 @@ static int adap_init0(struct adapter *adap) * Grab some of our basic fundamental operating parameters. */ #define FW_PARAM_DEV(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) #define FW_PARAM_PFVF(param) \ - FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ - FW_PARAMS_PARAM_Y(0) | \ - FW_PARAMS_PARAM_Z(0) + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ + FW_PARAMS_PARAM_Y_V(0) | \ + FW_PARAMS_PARAM_Z_V(0) params[0] = FW_PARAM_PFVF(EQ_START); params[1] = FW_PARAM_PFVF(L2T_START); @@ -5945,8 +5774,8 @@ static int adap_init0(struct adapter *adap) * to manage. */ memset(&caps_cmd, 0, sizeof(caps_cmd)); - caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -6092,6 +5921,7 @@ static int adap_init0(struct adapter *adap) t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, adap->params.b_wnd); } + t4_init_sge_params(adap); t4_init_tp_params(adap); adap->flags |= FW_OK; return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 1366ba620c87cdf506e1bd0757d7062cd5bf605e..152b4c4c7809599a0a38112b7b55ece4b7816e3e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -52,10 +52,10 @@ enum { }; #define INIT_TP_WR(w, tid) do { \ - (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ - FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ - (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ - FW_WR_FLOWID(tid)); \ + (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | \ + FW_WR_IMMDLEN_V(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*w), 16)) | \ + FW_WR_FLOWID_V(tid)); \ (w)->wr.wr_lo = cpu_to_be64(0); \ } while (0) @@ -65,9 +65,10 @@ enum { } while (0) #define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ - (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ - (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ - FW_WR_FLOWID(tid)); \ + (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | \ + FW_WR_ATOMIC_V(atomic)); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(wrlen, 16)) | \ + FW_WR_FLOWID_V(tid)); \ (w)->wr.wr_lo = cpu_to_be64(0); \ } while (0) @@ -304,4 +305,11 @@ void cxgb4_enable_db_coalescing(struct net_device *dev); int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte); u64 cxgb4_read_sge_timestamp(struct net_device *dev); +enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS }; +int cxgb4_bar2_sge_qregs(struct net_device *dev, + unsigned int qid, + enum cxgb4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + #endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 96041397ee15e657e23851be888f6e0f317ee46f..a047baa9fd0421289e6976b926bfbc554b38edc3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -435,9 +435,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev, if (tp->vnic_shift >= 0) { u32 viid = cxgb4_port_viid(dev); - u32 vf = FW_VIID_VIN_GET(viid); - u32 pf = FW_VIID_PFN_GET(viid); - u32 vld = FW_VIID_VIVLD_GET(viid); + u32 vf = FW_VIID_VIN_G(viid); + u32 pf = FW_VIID_PFN_G(viid); + u32 vld = FW_VIID_VIVLD_G(viid); ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) | diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 39f2b13e66c731add3b842ef8a8db1fcde906c4b..ebf935a1e352cecd43746aa1c2567a121a404b8e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -527,14 +527,16 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) val |= DBPRIO(1); wmb(); - /* If we're on T4, use the old doorbell mechanism; otherwise - * use the new BAR2 mechanism. + /* If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. */ - if (is_t4(adap->params.chip)) { + if (unlikely(q->bar2_addr == NULL)) { t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), val | QID(q->cntxt_id)); } else { - writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); + writel(val | QID(q->bar2_qid), + q->bar2_addr + SGE_UDB_KDOORBELL); /* This Write memory Barrier will force the write to * the User Doorbell area to be flushed. @@ -576,7 +578,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, __be64 *d = &q->desc[q->pidx]; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; - gfp |= __GFP_NOWARN | __GFP_COLD; + gfp |= __GFP_NOWARN; if (s->fl_pg_order == 0) goto alloc_small_pages; @@ -585,7 +587,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, * Prefer large buffers */ while (n) { - pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order); + pg = __dev_alloc_pages(gfp, s->fl_pg_order); if (unlikely(!pg)) { q->large_alloc_failed++; break; /* fall back to single pages */ @@ -615,7 +617,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, alloc_small_pages: while (n--) { - pg = __skb_alloc_page(gfp, NULL); + pg = __dev_alloc_page(gfp); if (unlikely(!pg)) { q->alloc_failed++; break; @@ -816,7 +818,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, sgl->addr0 = cpu_to_be64(addr[1]); } - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); if (likely(--nfrags == 0)) return; /* @@ -850,14 +852,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, *end = 0; } -/* This function copies a tx_desc struct to memory mapped BAR2 space(user space - * writes). For coalesced WR SGE, fetches data from the FIFO instead of from - * Host. +/* This function copies 64 byte coalesced work request to + * memory mapped BAR2 space. For coalesced WR SGE fetches + * data from the FIFO instead of from Host. */ -static void cxgb_pio_copy(u64 __iomem *dst, struct tx_desc *desc) +static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) { - int count = sizeof(*desc) / sizeof(u64); - u64 *src = (u64 *)desc; + int count = 8; while (count) { writeq(*src, dst); @@ -879,7 +880,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) { wmb(); /* write descriptors before telling HW */ - if (is_t4(adap->params.chip)) { + /* If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { u32 val = PIDX(n); unsigned long flags; @@ -905,21 +909,22 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) */ WARN_ON(val & DBPRIO(1)); - /* For T5 and later we use the Write-Combine mapped BAR2 User - * Doorbell mechanism. If we're only writing a single TX - * Descriptor and TX Write Combining hasn't been disabled, we - * can use the Write Combining Gather Buffer; otherwise we use - * the simple doorbell. + /* If we're only writing a single TX Descriptor and we can use + * Inferred QID registers, we can use the Write Combining + * Gather Buffer; otherwise we use the simple doorbell. */ - if (n == 1) { + if (n == 1 && q->bar2_qid == 0) { int index = (q->pidx ? (q->pidx - 1) : (q->size - 1)); + u64 *wr = (u64 *)&q->desc[index]; - cxgb_pio_copy(adap->bar2 + q->udb + SGE_UDB_WCDOORBELL, - q->desc + index); + cxgb_pio_copy((u64 __iomem *) + (q->bar2_addr + SGE_UDB_WCDOORBELL), + wr); } else { - writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); + writel(val | QID(q->bar2_qid), + q->bar2_addr + SGE_UDB_KDOORBELL); } /* This Write Memory Barrier will force the write to the User @@ -1092,10 +1097,10 @@ out_free: dev_kfree_skb_any(skb); goto out_free; } - wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { eth_txq_stop(q); - wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } wr = (void *)&q->q.desc[q->q.pidx]; @@ -1112,8 +1117,8 @@ out_free: dev_kfree_skb_any(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; len += sizeof(*lso); - wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | - FW_WR_IMMDLEN(len)); + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(len)); lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | LSO_FIRST_SLICE | LSO_LAST_SLICE | LSO_IPV6(v6) | @@ -1135,8 +1140,8 @@ out_free: dev_kfree_skb_any(skb); q->tx_cso += ssi->gso_segs; } else { len += sizeof(*cpl); - wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | - FW_WR_IMMDLEN(len)); + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(len)); cpl = (void *)(wr + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; @@ -1224,7 +1229,7 @@ static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) { reclaim_completed_tx_imm(&q->q); if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { - wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); q->q.stops++; q->full = 1; } @@ -1406,7 +1411,7 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) { struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; - wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); q->q.stops++; q->full = 1; } @@ -1997,11 +2002,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) params = QINTR_TIMER_IDX(7); val = CIDXINC(work_done) | SEINTARM(params); - if (is_t4(q->adap->params.chip)) { + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), val | INGRESSQID((u32)q->cntxt_id)); } else { - writel(val, q->adap->bar2 + q->udb + SGE_UDB_GTS); + writel(val | INGRESSQID(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); wmb(); } return work_done; @@ -2047,11 +2057,16 @@ static unsigned int process_intrq(struct adapter *adap) } val = CIDXINC(credits) | SEINTARM(q->intr_params); - if (is_t4(adap->params.chip)) { + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), val | INGRESSQID(q->cntxt_id)); } else { - writel(val, adap->bar2 + q->udb + SGE_UDB_GTS); + writel(val | INGRESSQID(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); wmb(); } spin_unlock(&adap->sge.intrq_lock); @@ -2235,48 +2250,32 @@ static void sge_tx_timer_cb(unsigned long data) } /** - * udb_address - return the BAR2 User Doorbell address for a Queue - * @adap: the adapter - * @cntxt_id: the Queue Context ID - * @qpp: Queues Per Page (for all PFs) + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues * - * Returns the BAR2 address of the user Doorbell associated with the - * indicated Queue Context ID. Note that this is only applicable - * for T5 and later. - */ -static u64 udb_address(struct adapter *adap, unsigned int cntxt_id, - unsigned int qpp) -{ - u64 udb; - unsigned int s_qpp; - unsigned short udb_density; - unsigned long qpshift; - int page; - - BUG_ON(is_t4(adap->params.chip)); - - s_qpp = (QUEUESPERPAGEPF0 + - (QUEUESPERPAGEPF1 - QUEUESPERPAGEPF0) * adap->fn); - udb_density = 1 << ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); - qpshift = PAGE_SHIFT - ilog2(udb_density); - udb = (u64)cntxt_id << qpshift; - udb &= PAGE_MASK; - page = udb / PAGE_SIZE; - udb += (cntxt_id - (page * udb_density)) * SGE_UDB_SIZE; - - return udb; -} + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; -static u64 udb_address_eq(struct adapter *adap, unsigned int cntxt_id) -{ - return udb_address(adap, cntxt_id, - t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); -} + ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; -static u64 udb_address_iq(struct adapter *adap, unsigned int cntxt_id) -{ - return udb_address(adap, cntxt_id, - t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); + return adapter->bar2 + bar2_qoffset; } int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, @@ -2297,20 +2296,20 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | FW_LEN16(c)); - c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | - FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | - FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | - FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | + FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) | + FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : -intr_idx - 1)); - c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | - FW_IQ_CMD_IQGTSMODE | - FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | - FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); + c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | + FW_IQ_CMD_IQGTSMODE_F | + FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); c.iqsize = htons(iq->size); c.iqaddr = cpu_to_be64(iq->phys_addr); @@ -2323,12 +2322,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, goto fl_nomem; flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); - c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) | - FW_IQ_CMD_FL0FETCHRO(1) | - FW_IQ_CMD_FL0DATARO(1) | - FW_IQ_CMD_FL0PADEN(1)); - c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | - FW_IQ_CMD_FL0FBMAX(3)); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F | + FW_IQ_CMD_FL0FETCHRO_F | + FW_IQ_CMD_FL0DATARO_F | + FW_IQ_CMD_FL0PADEN_F); + c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) | + FW_IQ_CMD_FL0FBMAX_V(3)); c.fl0size = htons(flsz); c.fl0addr = cpu_to_be64(fl->addr); } @@ -2344,8 +2343,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->next_intr_params = iq->intr_params; iq->cntxt_id = ntohs(c.iqid); iq->abs_id = ntohs(c.physiqid); - if (!is_t4(adap->params.chip)) - iq->udb = udb_address_iq(adap, iq->cntxt_id); + iq->bar2_addr = bar2_address(adap, + iq->cntxt_id, + T4_BAR2_QTYPE_INGRESS, + &iq->bar2_qid); iq->size--; /* subtract status entry */ iq->netdev = dev; iq->handler = hnd; @@ -2362,11 +2363,13 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; - /* Note, we must initialize the Free List User Doorbell - * address before refilling the Free List! + /* Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! */ - if (!is_t4(adap->params.chip)) - fl->udb = udb_address_eq(adap, fl->cntxt_id); + fl->bar2_addr = bar2_address(adap, + fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); } return 0; @@ -2392,9 +2395,10 @@ err: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) { q->cntxt_id = id; - if (!is_t4(adap->params.chip)) - q->udb = udb_address_eq(adap, q->cntxt_id); - + q->bar2_addr = bar2_address(adap, + q->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &q->bar2_qid); q->in_use = 0; q->cidx = q->pidx = 0; q->stops = q->restarts = 0; @@ -2423,21 +2427,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | - FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); - c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE | - FW_EQ_ETH_CMD_VIID(pi->viid)); - c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | - FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_ETH_CMD_FETCHRO(1) | - FW_EQ_ETH_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | - FW_EQ_ETH_CMD_FBMAX(3) | - FW_EQ_ETH_CMD_CIDXFTHRESH(5) | - FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_ETH_CMD_PFN_V(adap->fn) | + FW_EQ_ETH_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | + FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); + c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); + c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) | + FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_ETH_CMD_FETCHRO_V(1) | + FW_EQ_ETH_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) | + FW_EQ_ETH_CMD_FBMAX_V(3) | + FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) | + FW_EQ_ETH_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); @@ -2451,7 +2456,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return ret; } - init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); + init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->txq = netdevq; txq->tso = txq->tx_cso = txq->vlan_ins = 0; txq->mapping_err = 0; @@ -2476,22 +2481,22 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, if (!txq->q.desc) return -ENOMEM; - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_CTRL_CMD_PFN(adap->fn) | - FW_EQ_CTRL_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | - FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); - c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_CTRL_CMD_PFN_V(adap->fn) | + FW_EQ_CTRL_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | + FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); c.physeqid_pkd = htonl(0); - c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | - FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_CTRL_CMD_FETCHRO | - FW_EQ_CTRL_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | - FW_EQ_CTRL_CMD_FBMAX(3) | - FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | - FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) | + FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_CTRL_CMD_FETCHRO_F | + FW_EQ_CTRL_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) | + FW_EQ_CTRL_CMD_FBMAX_V(3) | + FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) | + FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); @@ -2503,7 +2508,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, return ret; } - init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); + init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); txq->adap = adap; skb_queue_head_init(&txq->sendq); tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); @@ -2530,20 +2535,20 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_OFLD_CMD_PFN(adap->fn) | - FW_EQ_OFLD_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | - FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); - c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | - FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_OFLD_CMD_FETCHRO(1) | - FW_EQ_OFLD_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | - FW_EQ_OFLD_CMD_FBMAX(3) | - FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | - FW_EQ_OFLD_CMD_EQSIZE(nentries)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(adap->fn) | + FW_EQ_OFLD_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | + FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); + c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) | + FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_OFLD_CMD_FETCHRO_F | + FW_EQ_OFLD_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) | + FW_EQ_OFLD_CMD_FBMAX_V(3) | + FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) | + FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); @@ -2557,7 +2562,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, return ret; } - init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); + init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->adap = adap; skb_queue_head_init(&txq->sendq); tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 163a2a14948cf8e3f5d78cf63588ee3905baa315..28d04153f9990797068218bc96afd484bb536c0c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -188,9 +188,9 @@ static void t4_report_fw_error(struct adapter *adap) u32 pcie_fw; pcie_fw = t4_read_reg(adap, MA_PCIE_FW); - if (pcie_fw & FW_PCIE_FW_ERR) + if (pcie_fw & PCIE_FW_ERR) dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", - reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]); + reason[PCIE_FW_EVAL_G(pcie_fw)]); } /* @@ -310,16 +310,17 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, } res = t4_read_reg64(adap, data_reg); - if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { + if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, data_reg); - res = FW_CMD_RETVAL(EIO); - } else if (rpl) + res = FW_CMD_RETVAL_V(EIO); + } else if (rpl) { get_mbox_rpl(adap, rpl, size / 8, data_reg); + } - if (FW_CMD_RETVAL_GET((int)res)) + if (FW_CMD_RETVAL_G((int)res)) dump_mbox(adap, mbox, data_reg); t4_write_reg(adap, ctl_reg, 0); - return -FW_CMD_RETVAL_GET((int)res); + return -FW_CMD_RETVAL_G((int)res); } } @@ -483,12 +484,12 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, * MEM_MC0 = 2 -- For T5 * MEM_MC1 = 3 -- For T5 */ - edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)); + edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); if (mtype != MEM_MC1) memoffset = (mtype * (edc_size * 1024 * 1024)); else { - mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, - MA_EXT_MEMORY_BAR)); + mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, + MA_EXT_MEMORY1_BAR_A)); memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; } @@ -710,8 +711,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) * Ask firmware for the Core Clock since it knows how to translate the * Reference Clock ('V2') VPD field into a Core Clock value ... */ - cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); ret = t4_query_params(adapter, adapter->mbox, 0, 0, 1, &cclk_param, &cclk_val); @@ -992,10 +993,10 @@ static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, install: dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " "installing firmware %u.%u.%u.%u on card.\n", - FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), - FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, - FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), - FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); return 1; } @@ -1067,12 +1068,12 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, "driver compiled with %d.%d.%d.%d, " "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", state, - FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), - FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), - FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), - FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), - FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), - FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); + FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), + FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); ret = EINVAL; goto bye; } @@ -1131,6 +1132,27 @@ unsigned int t4_flash_cfg_addr(struct adapter *adapter) return FLASH_CFG_START; } +/* Return TRUE if the specified firmware matches the adapter. I.e. T4 + * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead + * and emit an error message for mismatched firmware to save our caller the + * effort ... + */ +static bool t4_fw_matches_chip(const struct adapter *adap, + const struct fw_hdr *hdr) +{ + /* The expression below will return FALSE for any unsupported adapter + * which will keep us "honest" in the future ... + */ + if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || + (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5)) + return true; + + dev_err(adap->pdev_dev, + "FW image (%d) is not suitable for this adapter (%d)\n", + hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip)); + return false; +} + /** * t4_load_fw - download firmware * @adap: the adapter @@ -1170,6 +1192,8 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) FW_MAX_SIZE); return -EFBIG; } + if (!t4_fw_matches_chip(adap, hdr)) + return -EINVAL; for (csum = 0, i = 0; i < size / sizeof(csum); i++) csum += ntohl(p[i]); @@ -1212,6 +1236,8 @@ out: if (ret) dev_err(adap->pdev_dev, "firmware download failed, error %d\n", ret); + else + ret = t4_get_fw_version(adap, &adap->params.fw_vers); return ret; } @@ -1236,7 +1262,7 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; - unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); + unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); lc->link_ok = 0; if (lc->requested_fc & PAUSE_RX) @@ -1245,9 +1271,9 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, fc |= FW_PORT_CAP_FC_TX; memset(&c, 0, sizeof(c)); - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); if (!(lc->supported & FW_PORT_CAP_ANEG)) { @@ -1275,9 +1301,9 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) struct fw_port_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); @@ -1563,7 +1589,7 @@ static void cim_intr_handler(struct adapter *adapter) int fat; - if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR) + if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR) t4_report_fw_error(adapter); fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, @@ -2071,9 +2097,9 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, struct fw_rss_ind_tbl_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE | - FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_RSS_IND_TBL_CMD_VIID_V(viid)); cmd.retval_len16 = htonl(FW_LEN16(cmd)); /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ @@ -2090,13 +2116,13 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, while (nq > 0) { unsigned int v; - v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); + v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp); if (++rsp >= rsp_end) rsp = rspq; - v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); + v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp); if (++rsp >= rsp_end) rsp = rspq; - v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); + v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp); if (++rsp >= rsp_end) rsp = rspq; @@ -2126,14 +2152,14 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, struct fw_rss_glb_config_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); + c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); c.retval_len16 = htonl(FW_LEN16(c)); if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { - c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { c.u.basicvirtual.mode_pkd = - htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); } else return -EINVAL; @@ -2553,18 +2579,18 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) { memset(wr, 0, sizeof(*wr)); - wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); - wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16)); - wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | - V_FW_FILTER_WR_NOREPLY(qid < 0)); - wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); + wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); + wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16)); + wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) | + FW_FILTER_WR_NOREPLY_V(qid < 0)); + wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F); if (qid >= 0) - wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); + wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid)); } #define INIT_CMD(var, cmd, rd_wr) do { \ - (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ - FW_CMD_REQUEST | FW_CMD_##rd_wr); \ + (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \ + FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \ (var).retval_len16 = htonl(FW_LEN16(var)); \ } while (0) @@ -2574,9 +2600,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.addrval.addr = htonl(addr); c.u.addrval.val = htonl(val); @@ -2602,11 +2628,11 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); - c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | - FW_LDST_CMD_MMD(mmd)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | + FW_LDST_CMD_MMD_V(mmd)); c.u.mdio.raddr = htons(reg); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); @@ -2632,11 +2658,11 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); - c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | - FW_LDST_CMD_MMD(mmd)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | + FW_LDST_CMD_MMD_V(mmd)); c.u.mdio.raddr = htons(reg); c.u.mdio.rval = htons(val); @@ -2773,13 +2799,13 @@ retry: memset(&c, 0, sizeof(c)); INIT_CMD(c, HELLO, WRITE); c.err_to_clearinit = htonl( - FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | - FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | - FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : - FW_HELLO_CMD_MBMASTER_MASK) | - FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | - FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | - FW_HELLO_CMD_CLEARINIT); + FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) | + FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox : + FW_HELLO_CMD_MBMASTER_M) | + FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) | + FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) | + FW_HELLO_CMD_CLEARINIT_F); /* * Issue the HELLO command to the firmware. If it's not successful @@ -2792,17 +2818,17 @@ retry: if (ret < 0) { if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) goto retry; - if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR) + if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR) t4_report_fw_error(adap); return ret; } v = ntohl(c.err_to_clearinit); - master_mbox = FW_HELLO_CMD_MBMASTER_GET(v); + master_mbox = FW_HELLO_CMD_MBMASTER_G(v); if (state) { - if (v & FW_HELLO_CMD_ERR) + if (v & FW_HELLO_CMD_ERR_F) *state = DEV_STATE_ERR; - else if (v & FW_HELLO_CMD_INIT) + else if (v & FW_HELLO_CMD_INIT_F) *state = DEV_STATE_INIT; else *state = DEV_STATE_UNINIT; @@ -2817,9 +2843,9 @@ retry: * and we wouldn't want to fail pointlessly. (This can happen when an * OS loads lots of different drivers rapidly at the same time). In * this case, the Master PF returned by the firmware will be - * FW_PCIE_FW_MASTER_MASK so the test below will work ... + * PCIE_FW_MASTER_M so the test below will work ... */ - if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 && + if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 && master_mbox != mbox) { int waiting = FW_CMD_HELLO_TIMEOUT; @@ -2843,7 +2869,7 @@ retry: * our retries ... */ pcie_fw = t4_read_reg(adap, MA_PCIE_FW); - if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) { + if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { if (waiting <= 0) { if (retries-- > 0) goto retry; @@ -2858,9 +2884,9 @@ retry: * report errors preferentially. */ if (state) { - if (pcie_fw & FW_PCIE_FW_ERR) + if (pcie_fw & PCIE_FW_ERR) *state = DEV_STATE_ERR; - else if (pcie_fw & FW_PCIE_FW_INIT) + else if (pcie_fw & PCIE_FW_INIT) *state = DEV_STATE_INIT; } @@ -2869,9 +2895,9 @@ retry: * there's not a valid Master PF, grab its identity * for our caller. */ - if (master_mbox == FW_PCIE_FW_MASTER_MASK && - (pcie_fw & FW_PCIE_FW_MASTER_VLD)) - master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw); + if (master_mbox == PCIE_FW_MASTER_M && + (pcie_fw & PCIE_FW_MASTER_VLD)) + master_mbox = PCIE_FW_MASTER_G(pcie_fw); break; } } @@ -2939,7 +2965,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) * Issues a RESET command to firmware (if desired) with a HALT indication * and then puts the microprocessor into RESET state. The RESET command * will only be issued if a legitimate mailbox is provided (mbox <= - * FW_PCIE_FW_MASTER_MASK). + * PCIE_FW_MASTER_M). * * This is generally used in order for the host to safely manipulate the * adapter without fear of conflicting with whatever the firmware might @@ -2954,13 +2980,13 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) * If a legitimate mailbox is provided, issue a RESET command * with a HALT indication. */ - if (mbox <= FW_PCIE_FW_MASTER_MASK) { + if (mbox <= PCIE_FW_MASTER_M) { struct fw_reset_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = htonl(PIORST | PIORSTMODE); - c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U)); + c.halt_pkd = htonl(FW_RESET_CMD_HALT_F); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -2979,8 +3005,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) */ if (ret == 0 || force) { t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); - t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, - FW_PCIE_FW_HALT); + t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, + PCIE_FW_HALT_F); } /* @@ -3019,7 +3045,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) * doing it automatically, we need to clear the PCIE_FW.HALT * bit. */ - t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0); + t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0); /* * If we've been given a valid mailbox, first try to get the @@ -3028,7 +3054,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) * valid mailbox or the RESET command failed, fall back to * hitting the chip with a hammer. */ - if (mbox <= FW_PCIE_FW_MASTER_MASK) { + if (mbox <= PCIE_FW_MASTER_M) { t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); msleep(100); if (t4_fw_reset(adap, mbox, @@ -3043,7 +3069,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { - if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT)) + if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F)) return 0; msleep(100); ms += 100; @@ -3080,6 +3106,9 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; int reset, ret; + if (!t4_fw_matches_chip(adap, fw_hdr)) + return -EINVAL; + ret = t4_fw_halt(adap, mbox, force); if (ret < 0 && !force) return ret; @@ -3250,9 +3279,9 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | - FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); c.retval_len16 = htonl(FW_LEN16(c)); for (i = 0; i < nparams; i++, p += 2) *p = htonl(*params++); @@ -3290,10 +3319,10 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE | - FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); while (nparams--) { @@ -3328,9 +3357,9 @@ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); c.retval_len16 = htonl(FW_LEN16(c)); while (nparams--) { *p++ = htonl(*params++); @@ -3370,20 +3399,20 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_pfvf_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | - FW_PFVF_CMD_VFN(vf)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) | + FW_PFVF_CMD_VFN_V(vf)); c.retval_len16 = htonl(FW_LEN16(c)); - c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | - FW_PFVF_CMD_NIQ(rxq)); - c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | - FW_PFVF_CMD_PMASK(pmask) | - FW_PFVF_CMD_NEQ(txq)); - c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | - FW_PFVF_CMD_NEXACTF(nexact)); - c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | - FW_PFVF_CMD_WX_CAPS(wxcaps) | - FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); + c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) | + FW_PFVF_CMD_NIQ_V(rxq)); + c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) | + FW_PFVF_CMD_PMASK_V(pmask) | + FW_PFVF_CMD_NEQ_V(txq)); + c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) | + FW_PFVF_CMD_NEXACTF_V(nexact)); + c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) | + FW_PFVF_CMD_WX_CAPS_V(wxcaps) | + FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3412,11 +3441,11 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); - c.portid_pkd = FW_VI_CMD_PORTID(port); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c)); + c.portid_pkd = FW_VI_CMD_PORTID_V(port); c.nmac = nmac - 1; ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); @@ -3437,8 +3466,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, } } if (rss_size) - *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); - return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); + *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd)); + return FW_VI_CMD_VIID_G(ntohs(c.type_viid)); } /** @@ -3465,23 +3494,23 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, if (mtu < 0) mtu = FW_RXMODE_MTU_NO_CHG; if (promisc < 0) - promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; if (all_multi < 0) - all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; if (bcast < 0) - bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; if (vlanex < 0) - vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid)); c.retval_len16 = htonl(FW_LEN16(c)); - c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | - FW_VI_RXMODE_CMD_PROMISCEN(promisc) | - FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | - FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | - FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } @@ -3522,15 +3551,15 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16((naddr + 2) / 2)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) | + FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V((naddr + 2) / 2)); for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); } @@ -3539,7 +3568,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, return ret; for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); if (idx) idx[i] = index >= max_naddr ? 0xffff : index; @@ -3585,17 +3614,17 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); - p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_SMAC_RESULT(mode) | - FW_VI_MAC_CMD_IDX(idx)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_SMAC_RESULT_V(mode) | + FW_VI_MAC_CMD_IDX_V(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { - ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); if (ret >= max_mac_addr) ret = -ENOMEM; } @@ -3619,11 +3648,11 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, struct fw_vi_mac_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | - FW_VI_MAC_CMD_HASHUNIEN(ucast) | - FW_CMD_LEN16(1)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F | + FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | + FW_CMD_LEN16_V(1)); c.u.hash.hashvec = cpu_to_be64(vec); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } @@ -3646,12 +3675,12 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); - c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | - FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) | - FW_VI_ENABLE_CMD_DCB_INFO(dcb_en)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) | + FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) | + FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en)); return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); } @@ -3686,9 +3715,9 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); - c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c)); c.blinkdur = htons(nblinks); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3713,11 +3742,11 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | - FW_IQ_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); - c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | + FW_IQ_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype)); c.iqid = htons(iqid); c.fl0id = htons(fl0id); c.fl1id = htons(fl1id); @@ -3740,11 +3769,11 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_eq_eth_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | - FW_EQ_ETH_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); - c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) | + FW_EQ_ETH_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3764,11 +3793,11 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_eq_ctrl_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | - FW_EQ_CTRL_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); - c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) | + FW_EQ_CTRL_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3788,11 +3817,11 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_eq_ofld_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | - FW_EQ_OFLD_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); - c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) | + FW_EQ_OFLD_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3810,25 +3839,25 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) if (opcode == FW_PORT_CMD) { /* link/module state change message */ int speed = 0, fc = 0; const struct fw_port_cmd *p = (void *)rpl; - int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); + int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid)); int port = adap->chan_map[chan]; struct port_info *pi = adap2pinfo(adap, port); struct link_config *lc = &pi->link_cfg; u32 stat = ntohl(p->u.info.lstatus_to_modtype); - int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; - u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); + int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_G(stat); - if (stat & FW_PORT_CMD_RXPAUSE) + if (stat & FW_PORT_CMD_RXPAUSE_F) fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE) + if (stat & FW_PORT_CMD_TXPAUSE_F) fc |= PAUSE_TX; - if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) speed = 100; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) speed = 1000; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; if (link_ok != lc->link_ok || speed != lc->speed || @@ -4001,6 +4030,126 @@ int t4_prep_adapter(struct adapter *adapter) return 0; } +/** + * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.hps + 10; + page_size = 1 << page_shift; + + /* Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS + ? adapter->params.sge.eq_qpp + : adapter->params.sge.iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** + * t4_init_sge_params - initialize adap->params.sge + * @adapter: the adapter + * + * Initialize various fields of the adapter's SGE Parameters structure. + */ +int t4_init_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 hps, qpp; + unsigned int s_hps, s_qpp; + + /* Extract the SGE Page Size for our PF. + */ + hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE); + s_hps = (HOSTPAGESIZEPF0_S + + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); + sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); + + /* Extract the SGE Egress and Ingess Queues Per Page for our PF. + */ + s_qpp = (QUEUESPERPAGEPF0_S + + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); + qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF); + sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); + qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF); + sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); + + return 0; +} + /** * t4_init_tp_params - initialize adap->params.tp * @adap: the adapter @@ -4121,11 +4270,11 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) while ((adap->params.portvec & (1 << j)) == 0) j++; - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | - FW_CMD_REQUEST | FW_CMD_READ | - FW_PORT_CMD_PORTID(j)); + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(j)); c.action_to_len16 = htonl( - FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(c)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) @@ -4143,13 +4292,13 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) adap->port[i]->dev_port = j; ret = ntohl(c.u.info.lstatus_to_modtype); - p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? - FW_PORT_CMD_MDIOADDR_GET(ret) : -1; - p->port_type = FW_PORT_CMD_PTYPE_GET(ret); + p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? + FW_PORT_CMD_MDIOADDR_G(ret) : -1; + p->port_type = FW_PORT_CMD_PTYPE_G(ret); p->mod_type = FW_PORT_MOD_TYPE_NA; - rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ | + rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); rvc.retval_len16 = htonl(FW_LEN16(rvc)); ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 5f4db2398c71a97885727e9e97f58529a6fe08da..0f89f68948aba2e6f42f87188827ce7eac7bcf7e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -205,16 +205,62 @@ struct work_request_hdr { #define WR_HDR struct work_request_hdr wr /* option 0 fields */ -#define S_MSS_IDX 60 -#define M_MSS_IDX 0xF -#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX) -#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX) +#define TX_CHAN_S 2 +#define TX_CHAN_V(x) ((x) << TX_CHAN_S) + +#define ULP_MODE_S 8 +#define ULP_MODE_V(x) ((x) << ULP_MODE_S) + +#define RCV_BUFSIZ_S 12 +#define RCV_BUFSIZ_M 0x3FFU +#define RCV_BUFSIZ_V(x) ((x) << RCV_BUFSIZ_S) + +#define SMAC_SEL_S 28 +#define SMAC_SEL_V(x) ((__u64)(x) << SMAC_SEL_S) + +#define L2T_IDX_S 36 +#define L2T_IDX_V(x) ((__u64)(x) << L2T_IDX_S) + +#define WND_SCALE_S 50 +#define WND_SCALE_V(x) ((__u64)(x) << WND_SCALE_S) + +#define KEEP_ALIVE_S 54 +#define KEEP_ALIVE_V(x) ((__u64)(x) << KEEP_ALIVE_S) +#define KEEP_ALIVE_F KEEP_ALIVE_V(1ULL) + +#define MSS_IDX_S 60 +#define MSS_IDX_M 0xF +#define MSS_IDX_V(x) ((__u64)(x) << MSS_IDX_S) +#define MSS_IDX_G(x) (((x) >> MSS_IDX_S) & MSS_IDX_M) /* option 2 fields */ -#define S_RSS_QUEUE 0 -#define M_RSS_QUEUE 0x3FF -#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE) -#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE) +#define RSS_QUEUE_S 0 +#define RSS_QUEUE_M 0x3FF +#define RSS_QUEUE_V(x) ((x) << RSS_QUEUE_S) +#define RSS_QUEUE_G(x) (((x) >> RSS_QUEUE_S) & RSS_QUEUE_M) + +#define RSS_QUEUE_VALID_S 10 +#define RSS_QUEUE_VALID_V(x) ((x) << RSS_QUEUE_VALID_S) +#define RSS_QUEUE_VALID_F RSS_QUEUE_VALID_V(1U) + +#define RX_FC_DISABLE_S 20 +#define RX_FC_DISABLE_V(x) ((x) << RX_FC_DISABLE_S) +#define RX_FC_DISABLE_F RX_FC_DISABLE_V(1U) + +#define RX_FC_VALID_S 22 +#define RX_FC_VALID_V(x) ((x) << RX_FC_VALID_S) +#define RX_FC_VALID_F RX_FC_VALID_V(1U) + +#define RX_CHANNEL_S 26 +#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S) + +#define WND_SCALE_EN_S 28 +#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S) +#define WND_SCALE_EN_F WND_SCALE_EN_V(1U) + +#define T5_OPT_2_VALID_S 31 +#define T5_OPT_2_VALID_V(x) ((x) << T5_OPT_2_VALID_S) +#define T5_OPT_2_VALID_F T5_OPT_2_VALID_V(1U) struct cpl_pass_open_req { WR_HDR; @@ -224,20 +270,11 @@ struct cpl_pass_open_req { __be32 local_ip; __be32 peer_ip; __be64 opt0; -#define TX_CHAN(x) ((x) << 2) #define NO_CONG(x) ((x) << 4) #define DELACK(x) ((x) << 5) -#define ULP_MODE(x) ((x) << 8) -#define RCV_BUFSIZ(x) ((x) << 12) -#define RCV_BUFSIZ_MASK 0x3FFU #define DSCP(x) ((x) << 22) -#define SMAC_SEL(x) ((u64)(x) << 28) -#define L2T_IDX(x) ((u64)(x) << 36) #define TCAM_BYPASS(x) ((u64)(x) << 48) #define NAGLE(x) ((u64)(x) << 49) -#define WND_SCALE(x) ((u64)(x) << 50) -#define KEEP_ALIVE(x) ((u64)(x) << 54) -#define MSS_IDX(x) ((u64)(x) << 60) __be64 opt1; #define SYN_RSS_ENABLE (1 << 0) #define SYN_RSS_QUEUE(x) ((x) << 2) @@ -267,20 +304,13 @@ struct cpl_pass_accept_rpl { WR_HDR; union opcode_tid ot; __be32 opt2; -#define RSS_QUEUE(x) ((x) << 0) -#define RSS_QUEUE_VALID (1 << 10) #define RX_COALESCE_VALID(x) ((x) << 11) #define RX_COALESCE(x) ((x) << 12) #define PACE(x) ((x) << 16) -#define RX_FC_VALID ((1U) << 19) -#define RX_FC_DISABLE ((1U) << 20) #define TX_QUEUE(x) ((x) << 23) -#define RX_CHANNEL(x) ((x) << 26) #define CCTRL_ECN(x) ((x) << 27) -#define WND_SCALE_EN(x) ((x) << 28) #define TSTAMPS_EN(x) ((x) << 29) #define SACK_EN(x) ((x) << 30) -#define T5_OPT_2_VALID ((1U) << 31) __be64 opt0; }; @@ -305,10 +335,10 @@ struct cpl_act_open_req { __be32 opt2; }; -#define S_FILTER_TUPLE 24 -#define M_FILTER_TUPLE 0xFFFFFFFFFF -#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE) -#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE) +#define FILTER_TUPLE_S 24 +#define FILTER_TUPLE_M 0xFFFFFFFFFF +#define FILTER_TUPLE_V(x) ((x) << FILTER_TUPLE_S) +#define FILTER_TUPLE_G(x) (((x) >> FILTER_TUPLE_S) & FILTER_TUPLE_M) struct cpl_t5_act_open_req { WR_HDR; union opcode_tid ot; @@ -579,10 +609,16 @@ struct cpl_rx_data_ack { WR_HDR; union opcode_tid ot; __be32 credit_dack; -#define RX_CREDITS(x) ((x) << 0) -#define RX_FORCE_ACK(x) ((x) << 28) }; +/* cpl_rx_data_ack.ack_seq fields */ +#define RX_CREDITS_S 0 +#define RX_CREDITS_V(x) ((x) << RX_CREDITS_S) + +#define RX_FORCE_ACK_S 28 +#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S) +#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U) + struct cpl_rx_pkt { struct rss_header rsshdr; u8 opcode; @@ -803,6 +839,9 @@ enum { ULP_TX_SC_ISGL = 0x83 }; +#define ULPTX_CMD_S 24 +#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S) + struct ulptx_sge_pair { __be32 len[2]; __be64 addr[2]; @@ -810,7 +849,6 @@ struct ulptx_sge_pair { struct ulptx_sgl { __be32 cmd_nsge; -#define ULPTX_CMD(x) ((x) << 24) #define ULPTX_NSGE(x) ((x) << 0) #define ULPTX_MORE (1U << 23) __be32 len0; @@ -821,15 +859,21 @@ struct ulptx_sgl { struct ulp_mem_io { WR_HDR; __be32 cmd; -#define ULP_MEMIO_ORDER(x) ((x) << 23) __be32 len16; /* command length */ __be32 dlen; /* data length in 32-byte units */ -#define ULP_MEMIO_DATA_LEN(x) ((x) << 0) __be32 lock_addr; -#define ULP_MEMIO_ADDR(x) ((x) << 0) #define ULP_MEMIO_LOCK(x) ((x) << 31) }; +/* additional ulp_mem_io.cmd fields */ +#define ULP_MEMIO_ORDER_S 23 +#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S) +#define ULP_MEMIO_ORDER_F ULP_MEMIO_ORDER_V(1U) + +#define T5_ULP_MEMIO_IMM_S 23 +#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S) +#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U) + #define S_T5_ULP_MEMIO_IMM 23 #define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM) #define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U) @@ -838,4 +882,12 @@ struct ulp_mem_io { #define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER) #define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U) +/* ulp_mem_io.lock_addr fields */ +#define ULP_MEMIO_ADDR_S 0 +#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S) + +/* ulp_mem_io.dlen fields */ +#define ULP_MEMIO_DATA_LEN_S 0 +#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S) + #endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h new file mode 100644 index 0000000000000000000000000000000000000000..9e4f95a91fb4004af5d63569a316b0beb2deca93 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -0,0 +1,160 @@ +/* + * This file is part of the Chelsio T4/T5 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __T4_PCI_ID_TBL_H__ +#define __T4_PCI_ID_TBL_H__ + +/* The code can defined cpp macros for creating a PCI Device ID Table. This is + * useful because it allows the PCI ID Table to be maintained in a single place. + * + * The macros are: + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + * -- Used to start the definition of the PCI ID Table. + * + * CH_PCI_DEVICE_ID_FUNCTION + * -- The PCI Function Number to use in the PCI Device ID Table. "0" + * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4, + * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc. + * + * CH_PCI_DEVICE_ID_FUNCTION2 [optional] + * -- If defined, create a PCI Device ID Table with both + * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated. + * + * CH_PCI_ID_TABLE_ENTRY(DeviceID) + * -- Used for the individual PCI Device ID entries. Note that we will + * -- be adding a trailing comma (",") after all of the entries (and + * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined). + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_END + * -- Used to finish the definition of the PCI ID Table. Note that we + * -- will be adding a trailing semi-colon (";") here. + */ +#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + +#ifndef CH_PCI_DEVICE_ID_FUNCTION +#error CH_PCI_DEVICE_ID_FUNCTION not defined! +#endif +#ifndef CH_PCI_ID_TABLE_ENTRY +#error CH_PCI_ID_TABLE_ENTRY not defined! +#endif +#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END +#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined! +#endif + +/* T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use this consistency in order to create the proper PCI Device IDs + * for the specified CH_PCI_DEVICE_ID_FUNCTION. + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION2 +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)) +#else +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION2) << 8)) +#endif + +CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + /* T4 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x4000), /* T440-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x4001), /* T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4002), /* T422-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4003), /* T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4004), /* T420-bch */ + CH_PCI_ID_TABLE_FENTRY(0x4005), /* T440-bch */ + CH_PCI_ID_TABLE_FENTRY(0x4006), /* T440-ch */ + CH_PCI_ID_TABLE_FENTRY(0x4007), /* T420-so */ + CH_PCI_ID_TABLE_FENTRY(0x4008), /* T420-cx */ + CH_PCI_ID_TABLE_FENTRY(0x4009), /* T420-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400a), /* T404-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400b), /* B420-sr */ + CH_PCI_ID_TABLE_FENTRY(0x400c), /* B404-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400d), /* T480-cr */ + CH_PCI_ID_TABLE_FENTRY(0x400e), /* T440-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4080), /* Custom T480-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4081), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4082), /* Custom T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4083), /* Custom T420-xaui */ + CH_PCI_ID_TABLE_FENTRY(0x4084), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4085), /* Custom T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4086), /* Custom T440-bt */ + CH_PCI_ID_TABLE_FENTRY(0x4087), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4088), /* Custom T440 2-xaui, 2-xfi */ + + /* T5 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */ + CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */ + CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */ + CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */ + CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ + CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ + CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ +CH_PCI_DEVICE_ID_TABLE_DEFINE_END; + +#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ + +#endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 8d2de1006b084574e78a9095a721cd359b115945..d7bd34ee65bdbcab1acdbdb5826f5248063e2a9f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -155,17 +155,22 @@ #define HOSTPAGESIZEPF2_SHIFT 8 #define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT) -#define HOSTPAGESIZEPF1_MASK 0x0000000fU -#define HOSTPAGESIZEPF1_SHIFT 4 -#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_SHIFT) +#define HOSTPAGESIZEPF1_M 0x0000000fU +#define HOSTPAGESIZEPF1_S 4 +#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_S) -#define HOSTPAGESIZEPF0_MASK 0x0000000fU -#define HOSTPAGESIZEPF0_SHIFT 0 -#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) +#define HOSTPAGESIZEPF0_M 0x0000000fU +#define HOSTPAGESIZEPF0_S 0 +#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_S) #define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 -#define QUEUESPERPAGEPF0_MASK 0x0000000fU -#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) +#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014 + +#define QUEUESPERPAGEPF1_S 4 + +#define QUEUESPERPAGEPF0_S 0 +#define QUEUESPERPAGEPF0_MASK 0x0000000fU +#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) #define QUEUESPERPAGEPF0 0 #define QUEUESPERPAGEPF1 4 @@ -323,6 +328,7 @@ #define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc #define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 +#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8 #define S_HP_INT_THRESH 28 #define M_HP_INT_THRESH 0xfU @@ -511,21 +517,62 @@ #define MC_BIST_STATUS_RDATA 0x7688 -#define MA_EDRAM0_BAR 0x77c0 -#define MA_EDRAM1_BAR 0x77c4 -#define EDRAM_SIZE_MASK 0xfffU -#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK) +#define MA_EDRAM0_BAR_A 0x77c0 + +#define EDRAM0_SIZE_S 0 +#define EDRAM0_SIZE_M 0xfffU +#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S) +#define EDRAM0_SIZE_G(x) (((x) >> EDRAM0_SIZE_S) & EDRAM0_SIZE_M) + +#define MA_EDRAM1_BAR_A 0x77c4 + +#define EDRAM1_SIZE_S 0 +#define EDRAM1_SIZE_M 0xfffU +#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S) +#define EDRAM1_SIZE_G(x) (((x) >> EDRAM1_SIZE_S) & EDRAM1_SIZE_M) + +#define MA_EXT_MEMORY_BAR_A 0x77c8 + +#define EXT_MEM_SIZE_S 0 +#define EXT_MEM_SIZE_M 0xfffU +#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S) +#define EXT_MEM_SIZE_G(x) (((x) >> EXT_MEM_SIZE_S) & EXT_MEM_SIZE_M) + +#define MA_EXT_MEMORY1_BAR_A 0x7808 + +#define EXT_MEM1_SIZE_S 0 +#define EXT_MEM1_SIZE_M 0xfffU +#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S) +#define EXT_MEM1_SIZE_G(x) (((x) >> EXT_MEM1_SIZE_S) & EXT_MEM1_SIZE_M) + +#define MA_EXT_MEMORY0_BAR_A 0x77c8 + +#define EXT_MEM0_SIZE_S 0 +#define EXT_MEM0_SIZE_M 0xfffU +#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S) +#define EXT_MEM0_SIZE_G(x) (((x) >> EXT_MEM0_SIZE_S) & EXT_MEM0_SIZE_M) + +#define MA_TARGET_MEM_ENABLE_A 0x77d8 + +#define EXT_MEM_ENABLE_S 2 +#define EXT_MEM_ENABLE_V(x) ((x) << EXT_MEM_ENABLE_S) +#define EXT_MEM_ENABLE_F EXT_MEM_ENABLE_V(1U) + +#define EDRAM1_ENABLE_S 1 +#define EDRAM1_ENABLE_V(x) ((x) << EDRAM1_ENABLE_S) +#define EDRAM1_ENABLE_F EDRAM1_ENABLE_V(1U) + +#define EDRAM0_ENABLE_S 0 +#define EDRAM0_ENABLE_V(x) ((x) << EDRAM0_ENABLE_S) +#define EDRAM0_ENABLE_F EDRAM0_ENABLE_V(1U) -#define MA_EXT_MEMORY_BAR 0x77c8 -#define EXT_MEM_SIZE_MASK 0x00000fffU -#define EXT_MEM_SIZE_SHIFT 0 -#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) +#define EXT_MEM1_ENABLE_S 4 +#define EXT_MEM1_ENABLE_V(x) ((x) << EXT_MEM1_ENABLE_S) +#define EXT_MEM1_ENABLE_F EXT_MEM1_ENABLE_V(1U) -#define MA_TARGET_MEM_ENABLE 0x77d8 -#define EXT_MEM1_ENABLE 0x00000010U -#define EXT_MEM_ENABLE 0x00000004U -#define EDRAM1_ENABLE 0x00000002U -#define EDRAM0_ENABLE 0x00000001U +#define EXT_MEM0_ENABLE_S 2 +#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S) +#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U) #define MA_INT_CAUSE 0x77e0 #define MEM_PERR_INT_CAUSE 0x00000002U @@ -542,7 +589,6 @@ #define MA_PARITY_ERROR_STATUS 0x77f4 #define MA_PARITY_ERROR_STATUS2 0x7804 -#define MA_EXT_MEMORY1_BAR 0x7808 #define EDC_0_BASE_ADDR 0x7900 #define EDC_BIST_CMD 0x7904 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 3409756a85b95586f7640033dc06bb274c4db139..beaf80a6214b9175d36b0c32d15ed3eccb866dd2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -109,18 +109,49 @@ struct fw_wr_hdr { __be32 lo; }; -#define FW_WR_OP(x) ((x) << 24) -#define FW_WR_OP_GET(x) (((x) >> 24) & 0xff) -#define FW_WR_ATOMIC(x) ((x) << 23) -#define FW_WR_FLUSH(x) ((x) << 22) -#define FW_WR_COMPL(x) ((x) << 21) -#define FW_WR_IMMDLEN_MASK 0xff -#define FW_WR_IMMDLEN(x) ((x) << 0) - -#define FW_WR_EQUIQ (1U << 31) -#define FW_WR_EQUEQ (1U << 30) -#define FW_WR_FLOWID(x) ((x) << 8) -#define FW_WR_LEN16(x) ((x) << 0) +/* work request opcode (hi) */ +#define FW_WR_OP_S 24 +#define FW_WR_OP_M 0xff +#define FW_WR_OP_V(x) ((x) << FW_WR_OP_S) +#define FW_WR_OP_G(x) (((x) >> FW_WR_OP_S) & FW_WR_OP_M) + +/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER */ +#define FW_WR_ATOMIC_S 23 +#define FW_WR_ATOMIC_V(x) ((x) << FW_WR_ATOMIC_S) + +/* flush flag (hi) - firmware flushes flushable work request buffered + * in the flow context. + */ +#define FW_WR_FLUSH_S 22 +#define FW_WR_FLUSH_V(x) ((x) << FW_WR_FLUSH_S) + +/* completion flag (hi) - firmware generates a cpl_fw6_ack */ +#define FW_WR_COMPL_S 21 +#define FW_WR_COMPL_V(x) ((x) << FW_WR_COMPL_S) +#define FW_WR_COMPL_F FW_WR_COMPL_V(1U) + +/* work request immediate data length (hi) */ +#define FW_WR_IMMDLEN_S 0 +#define FW_WR_IMMDLEN_M 0xff +#define FW_WR_IMMDLEN_V(x) ((x) << FW_WR_IMMDLEN_S) + +/* egress queue status update to associated ingress queue entry (lo) */ +#define FW_WR_EQUIQ_S 31 +#define FW_WR_EQUIQ_V(x) ((x) << FW_WR_EQUIQ_S) +#define FW_WR_EQUIQ_F FW_WR_EQUIQ_V(1U) + +/* egress queue status update to egress queue status entry (lo) */ +#define FW_WR_EQUEQ_S 30 +#define FW_WR_EQUEQ_V(x) ((x) << FW_WR_EQUEQ_S) +#define FW_WR_EQUEQ_F FW_WR_EQUEQ_V(1U) + +/* flow context identifier (lo) */ +#define FW_WR_FLOWID_S 8 +#define FW_WR_FLOWID_V(x) ((x) << FW_WR_FLOWID_S) + +/* length in units of 16-bytes (lo) */ +#define FW_WR_LEN16_S 0 +#define FW_WR_LEN16_V(x) ((x) << FW_WR_LEN16_S) #define HW_TPL_FR_MT_PR_IV_P_FC 0X32B #define HW_TPL_FR_MT_PR_OV_P_FC 0X327 @@ -166,239 +197,239 @@ struct fw_filter_wr { __u8 sma[6]; }; -#define S_FW_FILTER_WR_TID 12 -#define M_FW_FILTER_WR_TID 0xfffff -#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID) -#define G_FW_FILTER_WR_TID(x) \ - (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID) - -#define S_FW_FILTER_WR_RQTYPE 11 -#define M_FW_FILTER_WR_RQTYPE 0x1 -#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE) -#define G_FW_FILTER_WR_RQTYPE(x) \ - (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE) -#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U) - -#define S_FW_FILTER_WR_NOREPLY 10 -#define M_FW_FILTER_WR_NOREPLY 0x1 -#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY) -#define G_FW_FILTER_WR_NOREPLY(x) \ - (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY) -#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U) - -#define S_FW_FILTER_WR_IQ 0 -#define M_FW_FILTER_WR_IQ 0x3ff -#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ) -#define G_FW_FILTER_WR_IQ(x) \ - (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ) - -#define S_FW_FILTER_WR_DEL_FILTER 31 -#define M_FW_FILTER_WR_DEL_FILTER 0x1 -#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER) -#define G_FW_FILTER_WR_DEL_FILTER(x) \ - (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER) -#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U) - -#define S_FW_FILTER_WR_RPTTID 25 -#define M_FW_FILTER_WR_RPTTID 0x1 -#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID) -#define G_FW_FILTER_WR_RPTTID(x) \ - (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID) -#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U) - -#define S_FW_FILTER_WR_DROP 24 -#define M_FW_FILTER_WR_DROP 0x1 -#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP) -#define G_FW_FILTER_WR_DROP(x) \ - (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP) -#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U) - -#define S_FW_FILTER_WR_DIRSTEER 23 -#define M_FW_FILTER_WR_DIRSTEER 0x1 -#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER) -#define G_FW_FILTER_WR_DIRSTEER(x) \ - (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER) -#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U) - -#define S_FW_FILTER_WR_MASKHASH 22 -#define M_FW_FILTER_WR_MASKHASH 0x1 -#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH) -#define G_FW_FILTER_WR_MASKHASH(x) \ - (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH) -#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U) - -#define S_FW_FILTER_WR_DIRSTEERHASH 21 -#define M_FW_FILTER_WR_DIRSTEERHASH 0x1 -#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH) -#define G_FW_FILTER_WR_DIRSTEERHASH(x) \ - (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH) -#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U) - -#define S_FW_FILTER_WR_LPBK 20 -#define M_FW_FILTER_WR_LPBK 0x1 -#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK) -#define G_FW_FILTER_WR_LPBK(x) \ - (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK) -#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U) - -#define S_FW_FILTER_WR_DMAC 19 -#define M_FW_FILTER_WR_DMAC 0x1 -#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC) -#define G_FW_FILTER_WR_DMAC(x) \ - (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC) -#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U) - -#define S_FW_FILTER_WR_SMAC 18 -#define M_FW_FILTER_WR_SMAC 0x1 -#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC) -#define G_FW_FILTER_WR_SMAC(x) \ - (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC) -#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U) - -#define S_FW_FILTER_WR_INSVLAN 17 -#define M_FW_FILTER_WR_INSVLAN 0x1 -#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN) -#define G_FW_FILTER_WR_INSVLAN(x) \ - (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN) -#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U) - -#define S_FW_FILTER_WR_RMVLAN 16 -#define M_FW_FILTER_WR_RMVLAN 0x1 -#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN) -#define G_FW_FILTER_WR_RMVLAN(x) \ - (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN) -#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U) - -#define S_FW_FILTER_WR_HITCNTS 15 -#define M_FW_FILTER_WR_HITCNTS 0x1 -#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS) -#define G_FW_FILTER_WR_HITCNTS(x) \ - (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS) -#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U) - -#define S_FW_FILTER_WR_TXCHAN 13 -#define M_FW_FILTER_WR_TXCHAN 0x3 -#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN) -#define G_FW_FILTER_WR_TXCHAN(x) \ - (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN) - -#define S_FW_FILTER_WR_PRIO 12 -#define M_FW_FILTER_WR_PRIO 0x1 -#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO) -#define G_FW_FILTER_WR_PRIO(x) \ - (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO) -#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U) - -#define S_FW_FILTER_WR_L2TIX 0 -#define M_FW_FILTER_WR_L2TIX 0xfff -#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX) -#define G_FW_FILTER_WR_L2TIX(x) \ - (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX) - -#define S_FW_FILTER_WR_FRAG 7 -#define M_FW_FILTER_WR_FRAG 0x1 -#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG) -#define G_FW_FILTER_WR_FRAG(x) \ - (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG) -#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U) - -#define S_FW_FILTER_WR_FRAGM 6 -#define M_FW_FILTER_WR_FRAGM 0x1 -#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM) -#define G_FW_FILTER_WR_FRAGM(x) \ - (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM) -#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U) - -#define S_FW_FILTER_WR_IVLAN_VLD 5 -#define M_FW_FILTER_WR_IVLAN_VLD 0x1 -#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD) -#define G_FW_FILTER_WR_IVLAN_VLD(x) \ - (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD) -#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U) - -#define S_FW_FILTER_WR_OVLAN_VLD 4 -#define M_FW_FILTER_WR_OVLAN_VLD 0x1 -#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD) -#define G_FW_FILTER_WR_OVLAN_VLD(x) \ - (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD) -#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U) - -#define S_FW_FILTER_WR_IVLAN_VLDM 3 -#define M_FW_FILTER_WR_IVLAN_VLDM 0x1 -#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM) -#define G_FW_FILTER_WR_IVLAN_VLDM(x) \ - (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM) -#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U) - -#define S_FW_FILTER_WR_OVLAN_VLDM 2 -#define M_FW_FILTER_WR_OVLAN_VLDM 0x1 -#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM) -#define G_FW_FILTER_WR_OVLAN_VLDM(x) \ - (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM) -#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U) - -#define S_FW_FILTER_WR_RX_CHAN 15 -#define M_FW_FILTER_WR_RX_CHAN 0x1 -#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN) -#define G_FW_FILTER_WR_RX_CHAN(x) \ - (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN) -#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U) - -#define S_FW_FILTER_WR_RX_RPL_IQ 0 -#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff -#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ) -#define G_FW_FILTER_WR_RX_RPL_IQ(x) \ - (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ) - -#define S_FW_FILTER_WR_MACI 23 -#define M_FW_FILTER_WR_MACI 0x1ff -#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI) -#define G_FW_FILTER_WR_MACI(x) \ - (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI) - -#define S_FW_FILTER_WR_MACIM 14 -#define M_FW_FILTER_WR_MACIM 0x1ff -#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM) -#define G_FW_FILTER_WR_MACIM(x) \ - (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM) - -#define S_FW_FILTER_WR_FCOE 13 -#define M_FW_FILTER_WR_FCOE 0x1 -#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE) -#define G_FW_FILTER_WR_FCOE(x) \ - (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE) -#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U) - -#define S_FW_FILTER_WR_FCOEM 12 -#define M_FW_FILTER_WR_FCOEM 0x1 -#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM) -#define G_FW_FILTER_WR_FCOEM(x) \ - (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM) -#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U) - -#define S_FW_FILTER_WR_PORT 9 -#define M_FW_FILTER_WR_PORT 0x7 -#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT) -#define G_FW_FILTER_WR_PORT(x) \ - (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT) - -#define S_FW_FILTER_WR_PORTM 6 -#define M_FW_FILTER_WR_PORTM 0x7 -#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM) -#define G_FW_FILTER_WR_PORTM(x) \ - (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM) - -#define S_FW_FILTER_WR_MATCHTYPE 3 -#define M_FW_FILTER_WR_MATCHTYPE 0x7 -#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE) -#define G_FW_FILTER_WR_MATCHTYPE(x) \ - (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE) - -#define S_FW_FILTER_WR_MATCHTYPEM 0 -#define M_FW_FILTER_WR_MATCHTYPEM 0x7 -#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM) -#define G_FW_FILTER_WR_MATCHTYPEM(x) \ - (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM) +#define FW_FILTER_WR_TID_S 12 +#define FW_FILTER_WR_TID_M 0xfffff +#define FW_FILTER_WR_TID_V(x) ((x) << FW_FILTER_WR_TID_S) +#define FW_FILTER_WR_TID_G(x) \ + (((x) >> FW_FILTER_WR_TID_S) & FW_FILTER_WR_TID_M) + +#define FW_FILTER_WR_RQTYPE_S 11 +#define FW_FILTER_WR_RQTYPE_M 0x1 +#define FW_FILTER_WR_RQTYPE_V(x) ((x) << FW_FILTER_WR_RQTYPE_S) +#define FW_FILTER_WR_RQTYPE_G(x) \ + (((x) >> FW_FILTER_WR_RQTYPE_S) & FW_FILTER_WR_RQTYPE_M) +#define FW_FILTER_WR_RQTYPE_F FW_FILTER_WR_RQTYPE_V(1U) + +#define FW_FILTER_WR_NOREPLY_S 10 +#define FW_FILTER_WR_NOREPLY_M 0x1 +#define FW_FILTER_WR_NOREPLY_V(x) ((x) << FW_FILTER_WR_NOREPLY_S) +#define FW_FILTER_WR_NOREPLY_G(x) \ + (((x) >> FW_FILTER_WR_NOREPLY_S) & FW_FILTER_WR_NOREPLY_M) +#define FW_FILTER_WR_NOREPLY_F FW_FILTER_WR_NOREPLY_V(1U) + +#define FW_FILTER_WR_IQ_S 0 +#define FW_FILTER_WR_IQ_M 0x3ff +#define FW_FILTER_WR_IQ_V(x) ((x) << FW_FILTER_WR_IQ_S) +#define FW_FILTER_WR_IQ_G(x) \ + (((x) >> FW_FILTER_WR_IQ_S) & FW_FILTER_WR_IQ_M) + +#define FW_FILTER_WR_DEL_FILTER_S 31 +#define FW_FILTER_WR_DEL_FILTER_M 0x1 +#define FW_FILTER_WR_DEL_FILTER_V(x) ((x) << FW_FILTER_WR_DEL_FILTER_S) +#define FW_FILTER_WR_DEL_FILTER_G(x) \ + (((x) >> FW_FILTER_WR_DEL_FILTER_S) & FW_FILTER_WR_DEL_FILTER_M) +#define FW_FILTER_WR_DEL_FILTER_F FW_FILTER_WR_DEL_FILTER_V(1U) + +#define FW_FILTER_WR_RPTTID_S 25 +#define FW_FILTER_WR_RPTTID_M 0x1 +#define FW_FILTER_WR_RPTTID_V(x) ((x) << FW_FILTER_WR_RPTTID_S) +#define FW_FILTER_WR_RPTTID_G(x) \ + (((x) >> FW_FILTER_WR_RPTTID_S) & FW_FILTER_WR_RPTTID_M) +#define FW_FILTER_WR_RPTTID_F FW_FILTER_WR_RPTTID_V(1U) + +#define FW_FILTER_WR_DROP_S 24 +#define FW_FILTER_WR_DROP_M 0x1 +#define FW_FILTER_WR_DROP_V(x) ((x) << FW_FILTER_WR_DROP_S) +#define FW_FILTER_WR_DROP_G(x) \ + (((x) >> FW_FILTER_WR_DROP_S) & FW_FILTER_WR_DROP_M) +#define FW_FILTER_WR_DROP_F FW_FILTER_WR_DROP_V(1U) + +#define FW_FILTER_WR_DIRSTEER_S 23 +#define FW_FILTER_WR_DIRSTEER_M 0x1 +#define FW_FILTER_WR_DIRSTEER_V(x) ((x) << FW_FILTER_WR_DIRSTEER_S) +#define FW_FILTER_WR_DIRSTEER_G(x) \ + (((x) >> FW_FILTER_WR_DIRSTEER_S) & FW_FILTER_WR_DIRSTEER_M) +#define FW_FILTER_WR_DIRSTEER_F FW_FILTER_WR_DIRSTEER_V(1U) + +#define FW_FILTER_WR_MASKHASH_S 22 +#define FW_FILTER_WR_MASKHASH_M 0x1 +#define FW_FILTER_WR_MASKHASH_V(x) ((x) << FW_FILTER_WR_MASKHASH_S) +#define FW_FILTER_WR_MASKHASH_G(x) \ + (((x) >> FW_FILTER_WR_MASKHASH_S) & FW_FILTER_WR_MASKHASH_M) +#define FW_FILTER_WR_MASKHASH_F FW_FILTER_WR_MASKHASH_V(1U) + +#define FW_FILTER_WR_DIRSTEERHASH_S 21 +#define FW_FILTER_WR_DIRSTEERHASH_M 0x1 +#define FW_FILTER_WR_DIRSTEERHASH_V(x) ((x) << FW_FILTER_WR_DIRSTEERHASH_S) +#define FW_FILTER_WR_DIRSTEERHASH_G(x) \ + (((x) >> FW_FILTER_WR_DIRSTEERHASH_S) & FW_FILTER_WR_DIRSTEERHASH_M) +#define FW_FILTER_WR_DIRSTEERHASH_F FW_FILTER_WR_DIRSTEERHASH_V(1U) + +#define FW_FILTER_WR_LPBK_S 20 +#define FW_FILTER_WR_LPBK_M 0x1 +#define FW_FILTER_WR_LPBK_V(x) ((x) << FW_FILTER_WR_LPBK_S) +#define FW_FILTER_WR_LPBK_G(x) \ + (((x) >> FW_FILTER_WR_LPBK_S) & FW_FILTER_WR_LPBK_M) +#define FW_FILTER_WR_LPBK_F FW_FILTER_WR_LPBK_V(1U) + +#define FW_FILTER_WR_DMAC_S 19 +#define FW_FILTER_WR_DMAC_M 0x1 +#define FW_FILTER_WR_DMAC_V(x) ((x) << FW_FILTER_WR_DMAC_S) +#define FW_FILTER_WR_DMAC_G(x) \ + (((x) >> FW_FILTER_WR_DMAC_S) & FW_FILTER_WR_DMAC_M) +#define FW_FILTER_WR_DMAC_F FW_FILTER_WR_DMAC_V(1U) + +#define FW_FILTER_WR_SMAC_S 18 +#define FW_FILTER_WR_SMAC_M 0x1 +#define FW_FILTER_WR_SMAC_V(x) ((x) << FW_FILTER_WR_SMAC_S) +#define FW_FILTER_WR_SMAC_G(x) \ + (((x) >> FW_FILTER_WR_SMAC_S) & FW_FILTER_WR_SMAC_M) +#define FW_FILTER_WR_SMAC_F FW_FILTER_WR_SMAC_V(1U) + +#define FW_FILTER_WR_INSVLAN_S 17 +#define FW_FILTER_WR_INSVLAN_M 0x1 +#define FW_FILTER_WR_INSVLAN_V(x) ((x) << FW_FILTER_WR_INSVLAN_S) +#define FW_FILTER_WR_INSVLAN_G(x) \ + (((x) >> FW_FILTER_WR_INSVLAN_S) & FW_FILTER_WR_INSVLAN_M) +#define FW_FILTER_WR_INSVLAN_F FW_FILTER_WR_INSVLAN_V(1U) + +#define FW_FILTER_WR_RMVLAN_S 16 +#define FW_FILTER_WR_RMVLAN_M 0x1 +#define FW_FILTER_WR_RMVLAN_V(x) ((x) << FW_FILTER_WR_RMVLAN_S) +#define FW_FILTER_WR_RMVLAN_G(x) \ + (((x) >> FW_FILTER_WR_RMVLAN_S) & FW_FILTER_WR_RMVLAN_M) +#define FW_FILTER_WR_RMVLAN_F FW_FILTER_WR_RMVLAN_V(1U) + +#define FW_FILTER_WR_HITCNTS_S 15 +#define FW_FILTER_WR_HITCNTS_M 0x1 +#define FW_FILTER_WR_HITCNTS_V(x) ((x) << FW_FILTER_WR_HITCNTS_S) +#define FW_FILTER_WR_HITCNTS_G(x) \ + (((x) >> FW_FILTER_WR_HITCNTS_S) & FW_FILTER_WR_HITCNTS_M) +#define FW_FILTER_WR_HITCNTS_F FW_FILTER_WR_HITCNTS_V(1U) + +#define FW_FILTER_WR_TXCHAN_S 13 +#define FW_FILTER_WR_TXCHAN_M 0x3 +#define FW_FILTER_WR_TXCHAN_V(x) ((x) << FW_FILTER_WR_TXCHAN_S) +#define FW_FILTER_WR_TXCHAN_G(x) \ + (((x) >> FW_FILTER_WR_TXCHAN_S) & FW_FILTER_WR_TXCHAN_M) + +#define FW_FILTER_WR_PRIO_S 12 +#define FW_FILTER_WR_PRIO_M 0x1 +#define FW_FILTER_WR_PRIO_V(x) ((x) << FW_FILTER_WR_PRIO_S) +#define FW_FILTER_WR_PRIO_G(x) \ + (((x) >> FW_FILTER_WR_PRIO_S) & FW_FILTER_WR_PRIO_M) +#define FW_FILTER_WR_PRIO_F FW_FILTER_WR_PRIO_V(1U) + +#define FW_FILTER_WR_L2TIX_S 0 +#define FW_FILTER_WR_L2TIX_M 0xfff +#define FW_FILTER_WR_L2TIX_V(x) ((x) << FW_FILTER_WR_L2TIX_S) +#define FW_FILTER_WR_L2TIX_G(x) \ + (((x) >> FW_FILTER_WR_L2TIX_S) & FW_FILTER_WR_L2TIX_M) + +#define FW_FILTER_WR_FRAG_S 7 +#define FW_FILTER_WR_FRAG_M 0x1 +#define FW_FILTER_WR_FRAG_V(x) ((x) << FW_FILTER_WR_FRAG_S) +#define FW_FILTER_WR_FRAG_G(x) \ + (((x) >> FW_FILTER_WR_FRAG_S) & FW_FILTER_WR_FRAG_M) +#define FW_FILTER_WR_FRAG_F FW_FILTER_WR_FRAG_V(1U) + +#define FW_FILTER_WR_FRAGM_S 6 +#define FW_FILTER_WR_FRAGM_M 0x1 +#define FW_FILTER_WR_FRAGM_V(x) ((x) << FW_FILTER_WR_FRAGM_S) +#define FW_FILTER_WR_FRAGM_G(x) \ + (((x) >> FW_FILTER_WR_FRAGM_S) & FW_FILTER_WR_FRAGM_M) +#define FW_FILTER_WR_FRAGM_F FW_FILTER_WR_FRAGM_V(1U) + +#define FW_FILTER_WR_IVLAN_VLD_S 5 +#define FW_FILTER_WR_IVLAN_VLD_M 0x1 +#define FW_FILTER_WR_IVLAN_VLD_V(x) ((x) << FW_FILTER_WR_IVLAN_VLD_S) +#define FW_FILTER_WR_IVLAN_VLD_G(x) \ + (((x) >> FW_FILTER_WR_IVLAN_VLD_S) & FW_FILTER_WR_IVLAN_VLD_M) +#define FW_FILTER_WR_IVLAN_VLD_F FW_FILTER_WR_IVLAN_VLD_V(1U) + +#define FW_FILTER_WR_OVLAN_VLD_S 4 +#define FW_FILTER_WR_OVLAN_VLD_M 0x1 +#define FW_FILTER_WR_OVLAN_VLD_V(x) ((x) << FW_FILTER_WR_OVLAN_VLD_S) +#define FW_FILTER_WR_OVLAN_VLD_G(x) \ + (((x) >> FW_FILTER_WR_OVLAN_VLD_S) & FW_FILTER_WR_OVLAN_VLD_M) +#define FW_FILTER_WR_OVLAN_VLD_F FW_FILTER_WR_OVLAN_VLD_V(1U) + +#define FW_FILTER_WR_IVLAN_VLDM_S 3 +#define FW_FILTER_WR_IVLAN_VLDM_M 0x1 +#define FW_FILTER_WR_IVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_IVLAN_VLDM_S) +#define FW_FILTER_WR_IVLAN_VLDM_G(x) \ + (((x) >> FW_FILTER_WR_IVLAN_VLDM_S) & FW_FILTER_WR_IVLAN_VLDM_M) +#define FW_FILTER_WR_IVLAN_VLDM_F FW_FILTER_WR_IVLAN_VLDM_V(1U) + +#define FW_FILTER_WR_OVLAN_VLDM_S 2 +#define FW_FILTER_WR_OVLAN_VLDM_M 0x1 +#define FW_FILTER_WR_OVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_OVLAN_VLDM_S) +#define FW_FILTER_WR_OVLAN_VLDM_G(x) \ + (((x) >> FW_FILTER_WR_OVLAN_VLDM_S) & FW_FILTER_WR_OVLAN_VLDM_M) +#define FW_FILTER_WR_OVLAN_VLDM_F FW_FILTER_WR_OVLAN_VLDM_V(1U) + +#define FW_FILTER_WR_RX_CHAN_S 15 +#define FW_FILTER_WR_RX_CHAN_M 0x1 +#define FW_FILTER_WR_RX_CHAN_V(x) ((x) << FW_FILTER_WR_RX_CHAN_S) +#define FW_FILTER_WR_RX_CHAN_G(x) \ + (((x) >> FW_FILTER_WR_RX_CHAN_S) & FW_FILTER_WR_RX_CHAN_M) +#define FW_FILTER_WR_RX_CHAN_F FW_FILTER_WR_RX_CHAN_V(1U) + +#define FW_FILTER_WR_RX_RPL_IQ_S 0 +#define FW_FILTER_WR_RX_RPL_IQ_M 0x3ff +#define FW_FILTER_WR_RX_RPL_IQ_V(x) ((x) << FW_FILTER_WR_RX_RPL_IQ_S) +#define FW_FILTER_WR_RX_RPL_IQ_G(x) \ + (((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M) + +#define FW_FILTER_WR_MACI_S 23 +#define FW_FILTER_WR_MACI_M 0x1ff +#define FW_FILTER_WR_MACI_V(x) ((x) << FW_FILTER_WR_MACI_S) +#define FW_FILTER_WR_MACI_G(x) \ + (((x) >> FW_FILTER_WR_MACI_S) & FW_FILTER_WR_MACI_M) + +#define FW_FILTER_WR_MACIM_S 14 +#define FW_FILTER_WR_MACIM_M 0x1ff +#define FW_FILTER_WR_MACIM_V(x) ((x) << FW_FILTER_WR_MACIM_S) +#define FW_FILTER_WR_MACIM_G(x) \ + (((x) >> FW_FILTER_WR_MACIM_S) & FW_FILTER_WR_MACIM_M) + +#define FW_FILTER_WR_FCOE_S 13 +#define FW_FILTER_WR_FCOE_M 0x1 +#define FW_FILTER_WR_FCOE_V(x) ((x) << FW_FILTER_WR_FCOE_S) +#define FW_FILTER_WR_FCOE_G(x) \ + (((x) >> FW_FILTER_WR_FCOE_S) & FW_FILTER_WR_FCOE_M) +#define FW_FILTER_WR_FCOE_F FW_FILTER_WR_FCOE_V(1U) + +#define FW_FILTER_WR_FCOEM_S 12 +#define FW_FILTER_WR_FCOEM_M 0x1 +#define FW_FILTER_WR_FCOEM_V(x) ((x) << FW_FILTER_WR_FCOEM_S) +#define FW_FILTER_WR_FCOEM_G(x) \ + (((x) >> FW_FILTER_WR_FCOEM_S) & FW_FILTER_WR_FCOEM_M) +#define FW_FILTER_WR_FCOEM_F FW_FILTER_WR_FCOEM_V(1U) + +#define FW_FILTER_WR_PORT_S 9 +#define FW_FILTER_WR_PORT_M 0x7 +#define FW_FILTER_WR_PORT_V(x) ((x) << FW_FILTER_WR_PORT_S) +#define FW_FILTER_WR_PORT_G(x) \ + (((x) >> FW_FILTER_WR_PORT_S) & FW_FILTER_WR_PORT_M) + +#define FW_FILTER_WR_PORTM_S 6 +#define FW_FILTER_WR_PORTM_M 0x7 +#define FW_FILTER_WR_PORTM_V(x) ((x) << FW_FILTER_WR_PORTM_S) +#define FW_FILTER_WR_PORTM_G(x) \ + (((x) >> FW_FILTER_WR_PORTM_S) & FW_FILTER_WR_PORTM_M) + +#define FW_FILTER_WR_MATCHTYPE_S 3 +#define FW_FILTER_WR_MATCHTYPE_M 0x7 +#define FW_FILTER_WR_MATCHTYPE_V(x) ((x) << FW_FILTER_WR_MATCHTYPE_S) +#define FW_FILTER_WR_MATCHTYPE_G(x) \ + (((x) >> FW_FILTER_WR_MATCHTYPE_S) & FW_FILTER_WR_MATCHTYPE_M) + +#define FW_FILTER_WR_MATCHTYPEM_S 0 +#define FW_FILTER_WR_MATCHTYPEM_M 0x7 +#define FW_FILTER_WR_MATCHTYPEM_V(x) ((x) << FW_FILTER_WR_MATCHTYPEM_S) +#define FW_FILTER_WR_MATCHTYPEM_G(x) \ + (((x) >> FW_FILTER_WR_MATCHTYPEM_S) & FW_FILTER_WR_MATCHTYPEM_M) struct fw_ulptx_wr { __be32 op_to_compl; @@ -460,65 +491,65 @@ struct fw_ofld_connection_wr { } tcb; }; -#define S_FW_OFLD_CONNECTION_WR_VERSION 31 -#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1 -#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_VERSION) -#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \ - M_FW_OFLD_CONNECTION_WR_VERSION) -#define F_FW_OFLD_CONNECTION_WR_VERSION \ - V_FW_OFLD_CONNECTION_WR_VERSION(1U) - -#define S_FW_OFLD_CONNECTION_WR_CPL 30 -#define M_FW_OFLD_CONNECTION_WR_CPL 0x1 -#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL) -#define G_FW_OFLD_CONNECTION_WR_CPL(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL) -#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U) - -#define S_FW_OFLD_CONNECTION_WR_T_STATE 28 -#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf -#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE) -#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \ - M_FW_OFLD_CONNECTION_WR_T_STATE) - -#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24 -#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf -#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE) -#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \ - M_FW_OFLD_CONNECTION_WR_RCV_SCALE) - -#define S_FW_OFLD_CONNECTION_WR_ASTID 0 -#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff -#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_ASTID) -#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID) - -#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15 -#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1 -#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) -#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \ - M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) -#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \ - V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U) - -#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14 -#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1 -#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) -#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \ - M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) -#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \ - V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U) +#define FW_OFLD_CONNECTION_WR_VERSION_S 31 +#define FW_OFLD_CONNECTION_WR_VERSION_M 0x1 +#define FW_OFLD_CONNECTION_WR_VERSION_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_VERSION_S) +#define FW_OFLD_CONNECTION_WR_VERSION_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_VERSION_S) & \ + FW_OFLD_CONNECTION_WR_VERSION_M) +#define FW_OFLD_CONNECTION_WR_VERSION_F \ + FW_OFLD_CONNECTION_WR_VERSION_V(1U) + +#define FW_OFLD_CONNECTION_WR_CPL_S 30 +#define FW_OFLD_CONNECTION_WR_CPL_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPL_V(x) ((x) << FW_OFLD_CONNECTION_WR_CPL_S) +#define FW_OFLD_CONNECTION_WR_CPL_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPL_S) & FW_OFLD_CONNECTION_WR_CPL_M) +#define FW_OFLD_CONNECTION_WR_CPL_F FW_OFLD_CONNECTION_WR_CPL_V(1U) + +#define FW_OFLD_CONNECTION_WR_T_STATE_S 28 +#define FW_OFLD_CONNECTION_WR_T_STATE_M 0xf +#define FW_OFLD_CONNECTION_WR_T_STATE_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_T_STATE_S) +#define FW_OFLD_CONNECTION_WR_T_STATE_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_T_STATE_S) & \ + FW_OFLD_CONNECTION_WR_T_STATE_M) + +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_S 24 +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_M 0xf +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_RCV_SCALE_S) +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_RCV_SCALE_S) & \ + FW_OFLD_CONNECTION_WR_RCV_SCALE_M) + +#define FW_OFLD_CONNECTION_WR_ASTID_S 0 +#define FW_OFLD_CONNECTION_WR_ASTID_M 0xffffff +#define FW_OFLD_CONNECTION_WR_ASTID_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_ASTID_S) +#define FW_OFLD_CONNECTION_WR_ASTID_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_ASTID_S) & FW_OFLD_CONNECTION_WR_ASTID_M) + +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S 15 +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) & \ + FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M) +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F \ + FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(1U) + +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S 14 +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) & \ + FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M) +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_F \ + FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(1U) enum fw_flowc_mnem { FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ @@ -539,33 +570,56 @@ struct fw_flowc_mnemval { struct fw_flowc_wr { __be32 op_to_nparams; -#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) __be32 flowid_len16; struct fw_flowc_mnemval mnemval[0]; }; +#define FW_FLOWC_WR_NPARAMS_S 0 +#define FW_FLOWC_WR_NPARAMS_V(x) ((x) << FW_FLOWC_WR_NPARAMS_S) + struct fw_ofld_tx_data_wr { __be32 op_to_immdlen; __be32 flowid_len16; __be32 plen; __be32 tunnel_to_proxy; -#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) -#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) -#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) -#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) -#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) -#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) -#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) -#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) }; +#define FW_OFLD_TX_DATA_WR_TUNNEL_S 19 +#define FW_OFLD_TX_DATA_WR_TUNNEL_V(x) ((x) << FW_OFLD_TX_DATA_WR_TUNNEL_S) + +#define FW_OFLD_TX_DATA_WR_SAVE_S 18 +#define FW_OFLD_TX_DATA_WR_SAVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SAVE_S) + +#define FW_OFLD_TX_DATA_WR_FLUSH_S 17 +#define FW_OFLD_TX_DATA_WR_FLUSH_V(x) ((x) << FW_OFLD_TX_DATA_WR_FLUSH_S) +#define FW_OFLD_TX_DATA_WR_FLUSH_F FW_OFLD_TX_DATA_WR_FLUSH_V(1U) + +#define FW_OFLD_TX_DATA_WR_URGENT_S 16 +#define FW_OFLD_TX_DATA_WR_URGENT_V(x) ((x) << FW_OFLD_TX_DATA_WR_URGENT_S) + +#define FW_OFLD_TX_DATA_WR_MORE_S 15 +#define FW_OFLD_TX_DATA_WR_MORE_V(x) ((x) << FW_OFLD_TX_DATA_WR_MORE_S) + +#define FW_OFLD_TX_DATA_WR_SHOVE_S 14 +#define FW_OFLD_TX_DATA_WR_SHOVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SHOVE_S) +#define FW_OFLD_TX_DATA_WR_SHOVE_F FW_OFLD_TX_DATA_WR_SHOVE_V(1U) + +#define FW_OFLD_TX_DATA_WR_ULPMODE_S 10 +#define FW_OFLD_TX_DATA_WR_ULPMODE_V(x) ((x) << FW_OFLD_TX_DATA_WR_ULPMODE_S) + +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_S 6 +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(x) \ + ((x) << FW_OFLD_TX_DATA_WR_ULPSUBMODE_S) + struct fw_cmd_wr { __be32 op_dma; -#define FW_CMD_WR_DMA (1U << 17) __be32 len16_pkd; __be64 cookie_daddr; }; +#define FW_CMD_WR_DMA_S 17 +#define FW_CMD_WR_DMA_V(x) ((x) << FW_CMD_WR_DMA_S) + struct fw_eth_tx_pkt_vm_wr { __be32 op_immdlen; __be32 equiq_to_len16; @@ -641,18 +695,39 @@ struct fw_cmd_hdr { __be32 lo; }; -#define FW_CMD_OP(x) ((x) << 24) -#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) -#define FW_CMD_REQUEST (1U << 23) -#define FW_CMD_REQUEST_GET(x) (((x) >> 23) & 0x1) -#define FW_CMD_READ (1U << 22) -#define FW_CMD_WRITE (1U << 21) -#define FW_CMD_EXEC (1U << 20) -#define FW_CMD_RAMASK(x) ((x) << 20) -#define FW_CMD_RETVAL(x) ((x) << 8) -#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) -#define FW_CMD_LEN16(x) ((x) << 0) -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) +#define FW_CMD_OP_S 24 +#define FW_CMD_OP_M 0xff +#define FW_CMD_OP_V(x) ((x) << FW_CMD_OP_S) +#define FW_CMD_OP_G(x) (((x) >> FW_CMD_OP_S) & FW_CMD_OP_M) + +#define FW_CMD_REQUEST_S 23 +#define FW_CMD_REQUEST_V(x) ((x) << FW_CMD_REQUEST_S) +#define FW_CMD_REQUEST_F FW_CMD_REQUEST_V(1U) + +#define FW_CMD_READ_S 22 +#define FW_CMD_READ_V(x) ((x) << FW_CMD_READ_S) +#define FW_CMD_READ_F FW_CMD_READ_V(1U) + +#define FW_CMD_WRITE_S 21 +#define FW_CMD_WRITE_V(x) ((x) << FW_CMD_WRITE_S) +#define FW_CMD_WRITE_F FW_CMD_WRITE_V(1U) + +#define FW_CMD_EXEC_S 20 +#define FW_CMD_EXEC_V(x) ((x) << FW_CMD_EXEC_S) +#define FW_CMD_EXEC_F FW_CMD_EXEC_V(1U) + +#define FW_CMD_RAMASK_S 20 +#define FW_CMD_RAMASK_V(x) ((x) << FW_CMD_RAMASK_S) + +#define FW_CMD_RETVAL_S 8 +#define FW_CMD_RETVAL_M 0xff +#define FW_CMD_RETVAL_V(x) ((x) << FW_CMD_RETVAL_S) +#define FW_CMD_RETVAL_G(x) (((x) >> FW_CMD_RETVAL_S) & FW_CMD_RETVAL_M) + +#define FW_CMD_LEN16_S 0 +#define FW_CMD_LEN16_V(x) ((x) << FW_CMD_LEN16_S) + +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) enum fw_ldst_addrspc { FW_LDST_ADDRSPC_FIRMWARE = 0x0001, @@ -685,7 +760,8 @@ enum fw_ldst_func_mod_index { struct fw_ldst_cmd { __be32 op_to_addrspace; -#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) +#define FW_LDST_CMD_ADDRSPACE_S 0 +#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S) __be32 cycles_to_len16; union fw_ldst { struct fw_ldst_addrval { @@ -741,15 +817,33 @@ struct fw_ldst_cmd { } u; }; -#define FW_LDST_CMD_MSG(x) ((x) << 31) -#define FW_LDST_CMD_PADDR(x) ((x) << 8) -#define FW_LDST_CMD_MMD(x) ((x) << 0) -#define FW_LDST_CMD_FID(x) ((x) << 15) -#define FW_LDST_CMD_CTL(x) ((x) << 0) -#define FW_LDST_CMD_RPLCPF(x) ((x) << 0) -#define FW_LDST_CMD_LC (1U << 4) -#define FW_LDST_CMD_NACCESS(x) ((x) << 0) -#define FW_LDST_CMD_FN(x) ((x) << 0) +#define FW_LDST_CMD_MSG_S 31 +#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S) + +#define FW_LDST_CMD_PADDR_S 8 +#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S) + +#define FW_LDST_CMD_MMD_S 0 +#define FW_LDST_CMD_MMD_V(x) ((x) << FW_LDST_CMD_MMD_S) + +#define FW_LDST_CMD_FID_S 15 +#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S) + +#define FW_LDST_CMD_CTL_S 0 +#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S) + +#define FW_LDST_CMD_RPLCPF_S 0 +#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S) + +#define FW_LDST_CMD_LC_S 4 +#define FW_LDST_CMD_LC_V(x) ((x) << FW_LDST_CMD_LC_S) +#define FW_LDST_CMD_LC_F FW_LDST_CMD_LC_V(1U) + +#define FW_LDST_CMD_FN_S 0 +#define FW_LDST_CMD_FN_V(x) ((x) << FW_LDST_CMD_FN_S) + +#define FW_LDST_CMD_NACCESS_S 0 +#define FW_LDST_CMD_NACCESS_V(x) ((x) << FW_LDST_CMD_NACCESS_S) struct fw_reset_cmd { __be32 op_to_write; @@ -758,11 +852,12 @@ struct fw_reset_cmd { __be32 halt_pkd; }; -#define FW_RESET_CMD_HALT_SHIFT 31 -#define FW_RESET_CMD_HALT_MASK 0x1 -#define FW_RESET_CMD_HALT(x) ((x) << FW_RESET_CMD_HALT_SHIFT) -#define FW_RESET_CMD_HALT_GET(x) \ - (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK) +#define FW_RESET_CMD_HALT_S 31 +#define FW_RESET_CMD_HALT_M 0x1 +#define FW_RESET_CMD_HALT_V(x) ((x) << FW_RESET_CMD_HALT_S) +#define FW_RESET_CMD_HALT_G(x) \ + (((x) >> FW_RESET_CMD_HALT_S) & FW_RESET_CMD_HALT_M) +#define FW_RESET_CMD_HALT_F FW_RESET_CMD_HALT_V(1U) enum fw_hellow_cmd { fw_hello_cmd_stage_os = 0x0 @@ -772,22 +867,42 @@ struct fw_hello_cmd { __be32 op_to_write; __be32 retval_len16; __be32 err_to_clearinit; -#define FW_HELLO_CMD_ERR (1U << 31) -#define FW_HELLO_CMD_INIT (1U << 30) -#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) -#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) -#define FW_HELLO_CMD_MBMASTER_MASK 0xfU -#define FW_HELLO_CMD_MBMASTER_SHIFT 24 -#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT) -#define FW_HELLO_CMD_MBMASTER_GET(x) \ - (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK) -#define FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << 23) -#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) -#define FW_HELLO_CMD_STAGE(x) ((x) << 17) -#define FW_HELLO_CMD_CLEARINIT (1U << 16) __be32 fwrev; }; +#define FW_HELLO_CMD_ERR_S 31 +#define FW_HELLO_CMD_ERR_V(x) ((x) << FW_HELLO_CMD_ERR_S) +#define FW_HELLO_CMD_ERR_F FW_HELLO_CMD_ERR_V(1U) + +#define FW_HELLO_CMD_INIT_S 30 +#define FW_HELLO_CMD_INIT_V(x) ((x) << FW_HELLO_CMD_INIT_S) +#define FW_HELLO_CMD_INIT_F FW_HELLO_CMD_INIT_V(1U) + +#define FW_HELLO_CMD_MASTERDIS_S 29 +#define FW_HELLO_CMD_MASTERDIS_V(x) ((x) << FW_HELLO_CMD_MASTERDIS_S) + +#define FW_HELLO_CMD_MASTERFORCE_S 28 +#define FW_HELLO_CMD_MASTERFORCE_V(x) ((x) << FW_HELLO_CMD_MASTERFORCE_S) + +#define FW_HELLO_CMD_MBMASTER_S 24 +#define FW_HELLO_CMD_MBMASTER_M 0xfU +#define FW_HELLO_CMD_MBMASTER_V(x) ((x) << FW_HELLO_CMD_MBMASTER_S) +#define FW_HELLO_CMD_MBMASTER_G(x) \ + (((x) >> FW_HELLO_CMD_MBMASTER_S) & FW_HELLO_CMD_MBMASTER_M) + +#define FW_HELLO_CMD_MBASYNCNOTINT_S 23 +#define FW_HELLO_CMD_MBASYNCNOTINT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOTINT_S) + +#define FW_HELLO_CMD_MBASYNCNOT_S 20 +#define FW_HELLO_CMD_MBASYNCNOT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOT_S) + +#define FW_HELLO_CMD_STAGE_S 17 +#define FW_HELLO_CMD_STAGE_V(x) ((x) << FW_HELLO_CMD_STAGE_S) + +#define FW_HELLO_CMD_CLEARINIT_S 16 +#define FW_HELLO_CMD_CLEARINIT_V(x) ((x) << FW_HELLO_CMD_CLEARINIT_S) +#define FW_HELLO_CMD_CLEARINIT_F FW_HELLO_CMD_CLEARINIT_V(1U) + struct fw_bye_cmd { __be32 op_to_write; __be32 retval_len16; @@ -898,9 +1013,17 @@ struct fw_caps_config_cmd { __be32 finicsum; }; -#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27) -#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24) -#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16) +#define FW_CAPS_CONFIG_CMD_CFVALID_S 27 +#define FW_CAPS_CONFIG_CMD_CFVALID_V(x) ((x) << FW_CAPS_CONFIG_CMD_CFVALID_S) +#define FW_CAPS_CONFIG_CMD_CFVALID_F FW_CAPS_CONFIG_CMD_CFVALID_V(1U) + +#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S 24 +#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(x) \ + ((x) << FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S) + +#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S 16 +#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(x) \ + ((x) << FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S) /* * params command mnemonics @@ -996,20 +1119,29 @@ enum fw_params_param_dmaq { FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, }; -#define FW_PARAMS_MNEM(x) ((x) << 24) -#define FW_PARAMS_PARAM_X(x) ((x) << 16) -#define FW_PARAMS_PARAM_Y_SHIFT 8 -#define FW_PARAMS_PARAM_Y_MASK 0xffU -#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT) -#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\ - FW_PARAMS_PARAM_Y_MASK) -#define FW_PARAMS_PARAM_Z_SHIFT 0 -#define FW_PARAMS_PARAM_Z_MASK 0xffu -#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT) -#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\ - FW_PARAMS_PARAM_Z_MASK) -#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) -#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) +#define FW_PARAMS_MNEM_S 24 +#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S) + +#define FW_PARAMS_PARAM_X_S 16 +#define FW_PARAMS_PARAM_X_V(x) ((x) << FW_PARAMS_PARAM_X_S) + +#define FW_PARAMS_PARAM_Y_S 8 +#define FW_PARAMS_PARAM_Y_M 0xffU +#define FW_PARAMS_PARAM_Y_V(x) ((x) << FW_PARAMS_PARAM_Y_S) +#define FW_PARAMS_PARAM_Y_G(x) (((x) >> FW_PARAMS_PARAM_Y_S) &\ + FW_PARAMS_PARAM_Y_M) + +#define FW_PARAMS_PARAM_Z_S 0 +#define FW_PARAMS_PARAM_Z_M 0xffu +#define FW_PARAMS_PARAM_Z_V(x) ((x) << FW_PARAMS_PARAM_Z_S) +#define FW_PARAMS_PARAM_Z_G(x) (((x) >> FW_PARAMS_PARAM_Z_S) &\ + FW_PARAMS_PARAM_Z_M) + +#define FW_PARAMS_PARAM_XYZ_S 0 +#define FW_PARAMS_PARAM_XYZ_V(x) ((x) << FW_PARAMS_PARAM_XYZ_S) + +#define FW_PARAMS_PARAM_YZ_S 0 +#define FW_PARAMS_PARAM_YZ_V(x) ((x) << FW_PARAMS_PARAM_YZ_S) struct fw_params_cmd { __be32 op_to_vfn; @@ -1020,8 +1152,11 @@ struct fw_params_cmd { } param[7]; }; -#define FW_PARAMS_CMD_PFN(x) ((x) << 8) -#define FW_PARAMS_CMD_VFN(x) ((x) << 0) +#define FW_PARAMS_CMD_PFN_S 8 +#define FW_PARAMS_CMD_PFN_V(x) ((x) << FW_PARAMS_CMD_PFN_S) + +#define FW_PARAMS_CMD_VFN_S 0 +#define FW_PARAMS_CMD_VFN_V(x) ((x) << FW_PARAMS_CMD_VFN_S) struct fw_pfvf_cmd { __be32 op_to_vfn; @@ -1035,46 +1170,82 @@ struct fw_pfvf_cmd { __be32 r4; }; -#define FW_PFVF_CMD_PFN(x) ((x) << 8) -#define FW_PFVF_CMD_VFN(x) ((x) << 0) - -#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) -#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) - -#define FW_PFVF_CMD_NIQ(x) ((x) << 0) -#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_PFVF_CMD_TYPE (1 << 31) -#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1) - -#define FW_PFVF_CMD_CMASK(x) ((x) << 24) -#define FW_PFVF_CMD_CMASK_MASK 0xf -#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK) - -#define FW_PFVF_CMD_PMASK(x) ((x) << 20) -#define FW_PFVF_CMD_PMASK_MASK 0xf -#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK) - -#define FW_PFVF_CMD_NEQ(x) ((x) << 0) -#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_PFVF_CMD_TC(x) ((x) << 24) -#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) - -#define FW_PFVF_CMD_NVI(x) ((x) << 16) -#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) - -#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) -#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) - -#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) -#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) - -#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) -#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) - -#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) -#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) +#define FW_PFVF_CMD_PFN_S 8 +#define FW_PFVF_CMD_PFN_V(x) ((x) << FW_PFVF_CMD_PFN_S) + +#define FW_PFVF_CMD_VFN_S 0 +#define FW_PFVF_CMD_VFN_V(x) ((x) << FW_PFVF_CMD_VFN_S) + +#define FW_PFVF_CMD_NIQFLINT_S 20 +#define FW_PFVF_CMD_NIQFLINT_M 0xfff +#define FW_PFVF_CMD_NIQFLINT_V(x) ((x) << FW_PFVF_CMD_NIQFLINT_S) +#define FW_PFVF_CMD_NIQFLINT_G(x) \ + (((x) >> FW_PFVF_CMD_NIQFLINT_S) & FW_PFVF_CMD_NIQFLINT_M) + +#define FW_PFVF_CMD_NIQ_S 0 +#define FW_PFVF_CMD_NIQ_M 0xfffff +#define FW_PFVF_CMD_NIQ_V(x) ((x) << FW_PFVF_CMD_NIQ_S) +#define FW_PFVF_CMD_NIQ_G(x) \ + (((x) >> FW_PFVF_CMD_NIQ_S) & FW_PFVF_CMD_NIQ_M) + +#define FW_PFVF_CMD_TYPE_S 31 +#define FW_PFVF_CMD_TYPE_M 0x1 +#define FW_PFVF_CMD_TYPE_V(x) ((x) << FW_PFVF_CMD_TYPE_S) +#define FW_PFVF_CMD_TYPE_G(x) \ + (((x) >> FW_PFVF_CMD_TYPE_S) & FW_PFVF_CMD_TYPE_M) +#define FW_PFVF_CMD_TYPE_F FW_PFVF_CMD_TYPE_V(1U) + +#define FW_PFVF_CMD_CMASK_S 24 +#define FW_PFVF_CMD_CMASK_M 0xf +#define FW_PFVF_CMD_CMASK_V(x) ((x) << FW_PFVF_CMD_CMASK_S) +#define FW_PFVF_CMD_CMASK_G(x) \ + (((x) >> FW_PFVF_CMD_CMASK_S) & FW_PFVF_CMD_CMASK_M) + +#define FW_PFVF_CMD_PMASK_S 20 +#define FW_PFVF_CMD_PMASK_M 0xf +#define FW_PFVF_CMD_PMASK_V(x) ((x) << FW_PFVF_CMD_PMASK_S) +#define FW_PFVF_CMD_PMASK_G(x) \ + (((x) >> FW_PFVF_CMD_PMASK_S) & FW_PFVF_CMD_PMASK_M) + +#define FW_PFVF_CMD_NEQ_S 0 +#define FW_PFVF_CMD_NEQ_M 0xfffff +#define FW_PFVF_CMD_NEQ_V(x) ((x) << FW_PFVF_CMD_NEQ_S) +#define FW_PFVF_CMD_NEQ_G(x) \ + (((x) >> FW_PFVF_CMD_NEQ_S) & FW_PFVF_CMD_NEQ_M) + +#define FW_PFVF_CMD_TC_S 24 +#define FW_PFVF_CMD_TC_M 0xff +#define FW_PFVF_CMD_TC_V(x) ((x) << FW_PFVF_CMD_TC_S) +#define FW_PFVF_CMD_TC_G(x) (((x) >> FW_PFVF_CMD_TC_S) & FW_PFVF_CMD_TC_M) + +#define FW_PFVF_CMD_NVI_S 16 +#define FW_PFVF_CMD_NVI_M 0xff +#define FW_PFVF_CMD_NVI_V(x) ((x) << FW_PFVF_CMD_NVI_S) +#define FW_PFVF_CMD_NVI_G(x) (((x) >> FW_PFVF_CMD_NVI_S) & FW_PFVF_CMD_NVI_M) + +#define FW_PFVF_CMD_NEXACTF_S 0 +#define FW_PFVF_CMD_NEXACTF_M 0xffff +#define FW_PFVF_CMD_NEXACTF_V(x) ((x) << FW_PFVF_CMD_NEXACTF_S) +#define FW_PFVF_CMD_NEXACTF_G(x) \ + (((x) >> FW_PFVF_CMD_NEXACTF_S) & FW_PFVF_CMD_NEXACTF_M) + +#define FW_PFVF_CMD_R_CAPS_S 24 +#define FW_PFVF_CMD_R_CAPS_M 0xff +#define FW_PFVF_CMD_R_CAPS_V(x) ((x) << FW_PFVF_CMD_R_CAPS_S) +#define FW_PFVF_CMD_R_CAPS_G(x) \ + (((x) >> FW_PFVF_CMD_R_CAPS_S) & FW_PFVF_CMD_R_CAPS_M) + +#define FW_PFVF_CMD_WX_CAPS_S 16 +#define FW_PFVF_CMD_WX_CAPS_M 0xff +#define FW_PFVF_CMD_WX_CAPS_V(x) ((x) << FW_PFVF_CMD_WX_CAPS_S) +#define FW_PFVF_CMD_WX_CAPS_G(x) \ + (((x) >> FW_PFVF_CMD_WX_CAPS_S) & FW_PFVF_CMD_WX_CAPS_M) + +#define FW_PFVF_CMD_NETHCTRL_S 0 +#define FW_PFVF_CMD_NETHCTRL_M 0xffff +#define FW_PFVF_CMD_NETHCTRL_V(x) ((x) << FW_PFVF_CMD_NETHCTRL_S) +#define FW_PFVF_CMD_NETHCTRL_G(x) \ + (((x) >> FW_PFVF_CMD_NETHCTRL_S) & FW_PFVF_CMD_NETHCTRL_M) enum fw_iq_type { FW_IQ_TYPE_FL_INT_CAP, @@ -1102,85 +1273,239 @@ struct fw_iq_cmd { __be64 fl1addr; }; -#define FW_IQ_CMD_PFN(x) ((x) << 8) -#define FW_IQ_CMD_VFN(x) ((x) << 0) - -#define FW_IQ_CMD_ALLOC (1U << 31) -#define FW_IQ_CMD_FREE (1U << 30) -#define FW_IQ_CMD_MODIFY (1U << 29) -#define FW_IQ_CMD_IQSTART(x) ((x) << 28) -#define FW_IQ_CMD_IQSTOP(x) ((x) << 27) - -#define FW_IQ_CMD_TYPE(x) ((x) << 29) -#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) -#define FW_IQ_CMD_VIID(x) ((x) << 16) -#define FW_IQ_CMD_IQANDST(x) ((x) << 15) -#define FW_IQ_CMD_IQANUS(x) ((x) << 14) -#define FW_IQ_CMD_IQANUD(x) ((x) << 12) -#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) - -#define FW_IQ_CMD_IQDROPRSS (1U << 15) -#define FW_IQ_CMD_IQGTSMODE (1U << 14) -#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) -#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) -#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) -#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) -#define FW_IQ_CMD_IQO (1U << 3) -#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) -#define FW_IQ_CMD_IQESIZE(x) ((x) << 0) - -#define FW_IQ_CMD_IQNS(x) ((x) << 31) -#define FW_IQ_CMD_IQRO(x) ((x) << 30) -#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) -#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) -#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) -#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) -#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) -#define FW_IQ_CMD_FL0DBP(x) ((x) << 14) -#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) -#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) -#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) -#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) -#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) -#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) -#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) -#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) -#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) -#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) -#define FW_IQ_CMD_FL0PADEN(x) ((x) << 2) -#define FW_IQ_CMD_FL0PACKEN(x) ((x) << 1) -#define FW_IQ_CMD_FL0CONGEN (1U << 0) - -#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) -#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) -#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) -#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) -#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) -#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) - -#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) -#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) -#define FW_IQ_CMD_FL1DBP(x) ((x) << 14) -#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) -#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) -#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) -#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) -#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) -#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) -#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) -#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) -#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) -#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) -#define FW_IQ_CMD_FL1PADEN (1U << 2) -#define FW_IQ_CMD_FL1PACKEN (1U << 1) -#define FW_IQ_CMD_FL1CONGEN (1U << 0) - -#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) -#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) -#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) -#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) -#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) -#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) +#define FW_IQ_CMD_PFN_S 8 +#define FW_IQ_CMD_PFN_V(x) ((x) << FW_IQ_CMD_PFN_S) + +#define FW_IQ_CMD_VFN_S 0 +#define FW_IQ_CMD_VFN_V(x) ((x) << FW_IQ_CMD_VFN_S) + +#define FW_IQ_CMD_ALLOC_S 31 +#define FW_IQ_CMD_ALLOC_V(x) ((x) << FW_IQ_CMD_ALLOC_S) +#define FW_IQ_CMD_ALLOC_F FW_IQ_CMD_ALLOC_V(1U) + +#define FW_IQ_CMD_FREE_S 30 +#define FW_IQ_CMD_FREE_V(x) ((x) << FW_IQ_CMD_FREE_S) +#define FW_IQ_CMD_FREE_F FW_IQ_CMD_FREE_V(1U) + +#define FW_IQ_CMD_MODIFY_S 29 +#define FW_IQ_CMD_MODIFY_V(x) ((x) << FW_IQ_CMD_MODIFY_S) +#define FW_IQ_CMD_MODIFY_F FW_IQ_CMD_MODIFY_V(1U) + +#define FW_IQ_CMD_IQSTART_S 28 +#define FW_IQ_CMD_IQSTART_V(x) ((x) << FW_IQ_CMD_IQSTART_S) +#define FW_IQ_CMD_IQSTART_F FW_IQ_CMD_IQSTART_V(1U) + +#define FW_IQ_CMD_IQSTOP_S 27 +#define FW_IQ_CMD_IQSTOP_V(x) ((x) << FW_IQ_CMD_IQSTOP_S) +#define FW_IQ_CMD_IQSTOP_F FW_IQ_CMD_IQSTOP_V(1U) + +#define FW_IQ_CMD_TYPE_S 29 +#define FW_IQ_CMD_TYPE_V(x) ((x) << FW_IQ_CMD_TYPE_S) + +#define FW_IQ_CMD_IQASYNCH_S 28 +#define FW_IQ_CMD_IQASYNCH_V(x) ((x) << FW_IQ_CMD_IQASYNCH_S) + +#define FW_IQ_CMD_VIID_S 16 +#define FW_IQ_CMD_VIID_V(x) ((x) << FW_IQ_CMD_VIID_S) + +#define FW_IQ_CMD_IQANDST_S 15 +#define FW_IQ_CMD_IQANDST_V(x) ((x) << FW_IQ_CMD_IQANDST_S) + +#define FW_IQ_CMD_IQANUS_S 14 +#define FW_IQ_CMD_IQANUS_V(x) ((x) << FW_IQ_CMD_IQANUS_S) + +#define FW_IQ_CMD_IQANUD_S 12 +#define FW_IQ_CMD_IQANUD_V(x) ((x) << FW_IQ_CMD_IQANUD_S) + +#define FW_IQ_CMD_IQANDSTINDEX_S 0 +#define FW_IQ_CMD_IQANDSTINDEX_V(x) ((x) << FW_IQ_CMD_IQANDSTINDEX_S) + +#define FW_IQ_CMD_IQDROPRSS_S 15 +#define FW_IQ_CMD_IQDROPRSS_V(x) ((x) << FW_IQ_CMD_IQDROPRSS_S) +#define FW_IQ_CMD_IQDROPRSS_F FW_IQ_CMD_IQDROPRSS_V(1U) + +#define FW_IQ_CMD_IQGTSMODE_S 14 +#define FW_IQ_CMD_IQGTSMODE_V(x) ((x) << FW_IQ_CMD_IQGTSMODE_S) +#define FW_IQ_CMD_IQGTSMODE_F FW_IQ_CMD_IQGTSMODE_V(1U) + +#define FW_IQ_CMD_IQPCIECH_S 12 +#define FW_IQ_CMD_IQPCIECH_V(x) ((x) << FW_IQ_CMD_IQPCIECH_S) + +#define FW_IQ_CMD_IQDCAEN_S 11 +#define FW_IQ_CMD_IQDCAEN_V(x) ((x) << FW_IQ_CMD_IQDCAEN_S) + +#define FW_IQ_CMD_IQDCACPU_S 6 +#define FW_IQ_CMD_IQDCACPU_V(x) ((x) << FW_IQ_CMD_IQDCACPU_S) + +#define FW_IQ_CMD_IQINTCNTTHRESH_S 4 +#define FW_IQ_CMD_IQINTCNTTHRESH_V(x) ((x) << FW_IQ_CMD_IQINTCNTTHRESH_S) + +#define FW_IQ_CMD_IQO_S 3 +#define FW_IQ_CMD_IQO_V(x) ((x) << FW_IQ_CMD_IQO_S) +#define FW_IQ_CMD_IQO_F FW_IQ_CMD_IQO_V(1U) + +#define FW_IQ_CMD_IQCPRIO_S 2 +#define FW_IQ_CMD_IQCPRIO_V(x) ((x) << FW_IQ_CMD_IQCPRIO_S) + +#define FW_IQ_CMD_IQESIZE_S 0 +#define FW_IQ_CMD_IQESIZE_V(x) ((x) << FW_IQ_CMD_IQESIZE_S) + +#define FW_IQ_CMD_IQNS_S 31 +#define FW_IQ_CMD_IQNS_V(x) ((x) << FW_IQ_CMD_IQNS_S) + +#define FW_IQ_CMD_IQRO_S 30 +#define FW_IQ_CMD_IQRO_V(x) ((x) << FW_IQ_CMD_IQRO_S) + +#define FW_IQ_CMD_IQFLINTIQHSEN_S 28 +#define FW_IQ_CMD_IQFLINTIQHSEN_V(x) ((x) << FW_IQ_CMD_IQFLINTIQHSEN_S) + +#define FW_IQ_CMD_IQFLINTCONGEN_S 27 +#define FW_IQ_CMD_IQFLINTCONGEN_V(x) ((x) << FW_IQ_CMD_IQFLINTCONGEN_S) + +#define FW_IQ_CMD_IQFLINTISCSIC_S 26 +#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S) + +#define FW_IQ_CMD_FL0CNGCHMAP_S 20 +#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S) + +#define FW_IQ_CMD_FL0CACHELOCK_S 15 +#define FW_IQ_CMD_FL0CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL0CACHELOCK_S) + +#define FW_IQ_CMD_FL0DBP_S 14 +#define FW_IQ_CMD_FL0DBP_V(x) ((x) << FW_IQ_CMD_FL0DBP_S) + +#define FW_IQ_CMD_FL0DATANS_S 13 +#define FW_IQ_CMD_FL0DATANS_V(x) ((x) << FW_IQ_CMD_FL0DATANS_S) + +#define FW_IQ_CMD_FL0DATARO_S 12 +#define FW_IQ_CMD_FL0DATARO_V(x) ((x) << FW_IQ_CMD_FL0DATARO_S) +#define FW_IQ_CMD_FL0DATARO_F FW_IQ_CMD_FL0DATARO_V(1U) + +#define FW_IQ_CMD_FL0CONGCIF_S 11 +#define FW_IQ_CMD_FL0CONGCIF_V(x) ((x) << FW_IQ_CMD_FL0CONGCIF_S) + +#define FW_IQ_CMD_FL0ONCHIP_S 10 +#define FW_IQ_CMD_FL0ONCHIP_V(x) ((x) << FW_IQ_CMD_FL0ONCHIP_S) + +#define FW_IQ_CMD_FL0STATUSPGNS_S 9 +#define FW_IQ_CMD_FL0STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGNS_S) + +#define FW_IQ_CMD_FL0STATUSPGRO_S 8 +#define FW_IQ_CMD_FL0STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGRO_S) + +#define FW_IQ_CMD_FL0FETCHNS_S 7 +#define FW_IQ_CMD_FL0FETCHNS_V(x) ((x) << FW_IQ_CMD_FL0FETCHNS_S) + +#define FW_IQ_CMD_FL0FETCHRO_S 6 +#define FW_IQ_CMD_FL0FETCHRO_V(x) ((x) << FW_IQ_CMD_FL0FETCHRO_S) +#define FW_IQ_CMD_FL0FETCHRO_F FW_IQ_CMD_FL0FETCHRO_V(1U) + +#define FW_IQ_CMD_FL0HOSTFCMODE_S 4 +#define FW_IQ_CMD_FL0HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL0HOSTFCMODE_S) + +#define FW_IQ_CMD_FL0CPRIO_S 3 +#define FW_IQ_CMD_FL0CPRIO_V(x) ((x) << FW_IQ_CMD_FL0CPRIO_S) + +#define FW_IQ_CMD_FL0PADEN_S 2 +#define FW_IQ_CMD_FL0PADEN_V(x) ((x) << FW_IQ_CMD_FL0PADEN_S) +#define FW_IQ_CMD_FL0PADEN_F FW_IQ_CMD_FL0PADEN_V(1U) + +#define FW_IQ_CMD_FL0PACKEN_S 1 +#define FW_IQ_CMD_FL0PACKEN_V(x) ((x) << FW_IQ_CMD_FL0PACKEN_S) +#define FW_IQ_CMD_FL0PACKEN_F FW_IQ_CMD_FL0PACKEN_V(1U) + +#define FW_IQ_CMD_FL0CONGEN_S 0 +#define FW_IQ_CMD_FL0CONGEN_V(x) ((x) << FW_IQ_CMD_FL0CONGEN_S) +#define FW_IQ_CMD_FL0CONGEN_F FW_IQ_CMD_FL0CONGEN_V(1U) + +#define FW_IQ_CMD_FL0DCAEN_S 15 +#define FW_IQ_CMD_FL0DCAEN_V(x) ((x) << FW_IQ_CMD_FL0DCAEN_S) + +#define FW_IQ_CMD_FL0DCACPU_S 10 +#define FW_IQ_CMD_FL0DCACPU_V(x) ((x) << FW_IQ_CMD_FL0DCACPU_S) + +#define FW_IQ_CMD_FL0FBMIN_S 7 +#define FW_IQ_CMD_FL0FBMIN_V(x) ((x) << FW_IQ_CMD_FL0FBMIN_S) + +#define FW_IQ_CMD_FL0FBMAX_S 4 +#define FW_IQ_CMD_FL0FBMAX_V(x) ((x) << FW_IQ_CMD_FL0FBMAX_S) + +#define FW_IQ_CMD_FL0CIDXFTHRESHO_S 3 +#define FW_IQ_CMD_FL0CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESHO_S) +#define FW_IQ_CMD_FL0CIDXFTHRESHO_F FW_IQ_CMD_FL0CIDXFTHRESHO_V(1U) + +#define FW_IQ_CMD_FL0CIDXFTHRESH_S 0 +#define FW_IQ_CMD_FL0CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESH_S) + +#define FW_IQ_CMD_FL1CNGCHMAP_S 20 +#define FW_IQ_CMD_FL1CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL1CNGCHMAP_S) + +#define FW_IQ_CMD_FL1CACHELOCK_S 15 +#define FW_IQ_CMD_FL1CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL1CACHELOCK_S) + +#define FW_IQ_CMD_FL1DBP_S 14 +#define FW_IQ_CMD_FL1DBP_V(x) ((x) << FW_IQ_CMD_FL1DBP_S) + +#define FW_IQ_CMD_FL1DATANS_S 13 +#define FW_IQ_CMD_FL1DATANS_V(x) ((x) << FW_IQ_CMD_FL1DATANS_S) + +#define FW_IQ_CMD_FL1DATARO_S 12 +#define FW_IQ_CMD_FL1DATARO_V(x) ((x) << FW_IQ_CMD_FL1DATARO_S) + +#define FW_IQ_CMD_FL1CONGCIF_S 11 +#define FW_IQ_CMD_FL1CONGCIF_V(x) ((x) << FW_IQ_CMD_FL1CONGCIF_S) + +#define FW_IQ_CMD_FL1ONCHIP_S 10 +#define FW_IQ_CMD_FL1ONCHIP_V(x) ((x) << FW_IQ_CMD_FL1ONCHIP_S) + +#define FW_IQ_CMD_FL1STATUSPGNS_S 9 +#define FW_IQ_CMD_FL1STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGNS_S) + +#define FW_IQ_CMD_FL1STATUSPGRO_S 8 +#define FW_IQ_CMD_FL1STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGRO_S) + +#define FW_IQ_CMD_FL1FETCHNS_S 7 +#define FW_IQ_CMD_FL1FETCHNS_V(x) ((x) << FW_IQ_CMD_FL1FETCHNS_S) + +#define FW_IQ_CMD_FL1FETCHRO_S 6 +#define FW_IQ_CMD_FL1FETCHRO_V(x) ((x) << FW_IQ_CMD_FL1FETCHRO_S) + +#define FW_IQ_CMD_FL1HOSTFCMODE_S 4 +#define FW_IQ_CMD_FL1HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL1HOSTFCMODE_S) + +#define FW_IQ_CMD_FL1CPRIO_S 3 +#define FW_IQ_CMD_FL1CPRIO_V(x) ((x) << FW_IQ_CMD_FL1CPRIO_S) + +#define FW_IQ_CMD_FL1PADEN_S 2 +#define FW_IQ_CMD_FL1PADEN_V(x) ((x) << FW_IQ_CMD_FL1PADEN_S) +#define FW_IQ_CMD_FL1PADEN_F FW_IQ_CMD_FL1PADEN_V(1U) + +#define FW_IQ_CMD_FL1PACKEN_S 1 +#define FW_IQ_CMD_FL1PACKEN_V(x) ((x) << FW_IQ_CMD_FL1PACKEN_S) +#define FW_IQ_CMD_FL1PACKEN_F FW_IQ_CMD_FL1PACKEN_V(1U) + +#define FW_IQ_CMD_FL1CONGEN_S 0 +#define FW_IQ_CMD_FL1CONGEN_V(x) ((x) << FW_IQ_CMD_FL1CONGEN_S) +#define FW_IQ_CMD_FL1CONGEN_F FW_IQ_CMD_FL1CONGEN_V(1U) + +#define FW_IQ_CMD_FL1DCAEN_S 15 +#define FW_IQ_CMD_FL1DCAEN_V(x) ((x) << FW_IQ_CMD_FL1DCAEN_S) + +#define FW_IQ_CMD_FL1DCACPU_S 10 +#define FW_IQ_CMD_FL1DCACPU_V(x) ((x) << FW_IQ_CMD_FL1DCACPU_S) + +#define FW_IQ_CMD_FL1FBMIN_S 7 +#define FW_IQ_CMD_FL1FBMIN_V(x) ((x) << FW_IQ_CMD_FL1FBMIN_S) + +#define FW_IQ_CMD_FL1FBMAX_S 4 +#define FW_IQ_CMD_FL1FBMAX_V(x) ((x) << FW_IQ_CMD_FL1FBMAX_S) + +#define FW_IQ_CMD_FL1CIDXFTHRESHO_S 3 +#define FW_IQ_CMD_FL1CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESHO_S) +#define FW_IQ_CMD_FL1CIDXFTHRESHO_F FW_IQ_CMD_FL1CIDXFTHRESHO_V(1U) + +#define FW_IQ_CMD_FL1CIDXFTHRESH_S 0 +#define FW_IQ_CMD_FL1CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESH_S) struct fw_eq_eth_cmd { __be32 op_to_vfn; @@ -1195,40 +1520,102 @@ struct fw_eq_eth_cmd { __be64 r9; }; -#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) -#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) -#define FW_EQ_ETH_CMD_ALLOC (1U << 31) -#define FW_EQ_ETH_CMD_FREE (1U << 30) -#define FW_EQ_ETH_CMD_MODIFY (1U << 29) -#define FW_EQ_ETH_CMD_EQSTART (1U << 28) -#define FW_EQ_ETH_CMD_EQSTOP (1U << 27) - -#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) -#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) -#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) -#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) -#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) -#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) -#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) -#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) - -#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30) -#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) +#define FW_EQ_ETH_CMD_PFN_S 8 +#define FW_EQ_ETH_CMD_PFN_V(x) ((x) << FW_EQ_ETH_CMD_PFN_S) + +#define FW_EQ_ETH_CMD_VFN_S 0 +#define FW_EQ_ETH_CMD_VFN_V(x) ((x) << FW_EQ_ETH_CMD_VFN_S) + +#define FW_EQ_ETH_CMD_ALLOC_S 31 +#define FW_EQ_ETH_CMD_ALLOC_V(x) ((x) << FW_EQ_ETH_CMD_ALLOC_S) +#define FW_EQ_ETH_CMD_ALLOC_F FW_EQ_ETH_CMD_ALLOC_V(1U) + +#define FW_EQ_ETH_CMD_FREE_S 30 +#define FW_EQ_ETH_CMD_FREE_V(x) ((x) << FW_EQ_ETH_CMD_FREE_S) +#define FW_EQ_ETH_CMD_FREE_F FW_EQ_ETH_CMD_FREE_V(1U) + +#define FW_EQ_ETH_CMD_MODIFY_S 29 +#define FW_EQ_ETH_CMD_MODIFY_V(x) ((x) << FW_EQ_ETH_CMD_MODIFY_S) +#define FW_EQ_ETH_CMD_MODIFY_F FW_EQ_ETH_CMD_MODIFY_V(1U) + +#define FW_EQ_ETH_CMD_EQSTART_S 28 +#define FW_EQ_ETH_CMD_EQSTART_V(x) ((x) << FW_EQ_ETH_CMD_EQSTART_S) +#define FW_EQ_ETH_CMD_EQSTART_F FW_EQ_ETH_CMD_EQSTART_V(1U) + +#define FW_EQ_ETH_CMD_EQSTOP_S 27 +#define FW_EQ_ETH_CMD_EQSTOP_V(x) ((x) << FW_EQ_ETH_CMD_EQSTOP_S) +#define FW_EQ_ETH_CMD_EQSTOP_F FW_EQ_ETH_CMD_EQSTOP_V(1U) + +#define FW_EQ_ETH_CMD_EQID_S 0 +#define FW_EQ_ETH_CMD_EQID_M 0xfffff +#define FW_EQ_ETH_CMD_EQID_V(x) ((x) << FW_EQ_ETH_CMD_EQID_S) +#define FW_EQ_ETH_CMD_EQID_G(x) \ + (((x) >> FW_EQ_ETH_CMD_EQID_S) & FW_EQ_ETH_CMD_EQID_M) + +#define FW_EQ_ETH_CMD_PHYSEQID_S 0 +#define FW_EQ_ETH_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_ETH_CMD_PHYSEQID_V(x) ((x) << FW_EQ_ETH_CMD_PHYSEQID_S) +#define FW_EQ_ETH_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_ETH_CMD_PHYSEQID_S) & FW_EQ_ETH_CMD_PHYSEQID_M) + +#define FW_EQ_ETH_CMD_FETCHSZM_S 26 +#define FW_EQ_ETH_CMD_FETCHSZM_V(x) ((x) << FW_EQ_ETH_CMD_FETCHSZM_S) +#define FW_EQ_ETH_CMD_FETCHSZM_F FW_EQ_ETH_CMD_FETCHSZM_V(1U) + +#define FW_EQ_ETH_CMD_STATUSPGNS_S 25 +#define FW_EQ_ETH_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGNS_S) + +#define FW_EQ_ETH_CMD_STATUSPGRO_S 24 +#define FW_EQ_ETH_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGRO_S) + +#define FW_EQ_ETH_CMD_FETCHNS_S 23 +#define FW_EQ_ETH_CMD_FETCHNS_V(x) ((x) << FW_EQ_ETH_CMD_FETCHNS_S) + +#define FW_EQ_ETH_CMD_FETCHRO_S 22 +#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S) + +#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20 +#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S) + +#define FW_EQ_ETH_CMD_CPRIO_S 19 +#define FW_EQ_ETH_CMD_CPRIO_V(x) ((x) << FW_EQ_ETH_CMD_CPRIO_S) + +#define FW_EQ_ETH_CMD_ONCHIP_S 18 +#define FW_EQ_ETH_CMD_ONCHIP_V(x) ((x) << FW_EQ_ETH_CMD_ONCHIP_S) + +#define FW_EQ_ETH_CMD_PCIECHN_S 16 +#define FW_EQ_ETH_CMD_PCIECHN_V(x) ((x) << FW_EQ_ETH_CMD_PCIECHN_S) + +#define FW_EQ_ETH_CMD_IQID_S 0 +#define FW_EQ_ETH_CMD_IQID_V(x) ((x) << FW_EQ_ETH_CMD_IQID_S) + +#define FW_EQ_ETH_CMD_DCAEN_S 31 +#define FW_EQ_ETH_CMD_DCAEN_V(x) ((x) << FW_EQ_ETH_CMD_DCAEN_S) + +#define FW_EQ_ETH_CMD_DCACPU_S 26 +#define FW_EQ_ETH_CMD_DCACPU_V(x) ((x) << FW_EQ_ETH_CMD_DCACPU_S) + +#define FW_EQ_ETH_CMD_FBMIN_S 23 +#define FW_EQ_ETH_CMD_FBMIN_V(x) ((x) << FW_EQ_ETH_CMD_FBMIN_S) + +#define FW_EQ_ETH_CMD_FBMAX_S 20 +#define FW_EQ_ETH_CMD_FBMAX_V(x) ((x) << FW_EQ_ETH_CMD_FBMAX_S) + +#define FW_EQ_ETH_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_ETH_CMD_CIDXFTHRESHO_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_ETH_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_ETH_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESH_S) + +#define FW_EQ_ETH_CMD_EQSIZE_S 0 +#define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S) + +#define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30 +#define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S) +#define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U) + +#define FW_EQ_ETH_CMD_VIID_S 16 +#define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S) struct fw_eq_ctrl_cmd { __be32 op_to_vfn; @@ -1240,38 +1627,102 @@ struct fw_eq_ctrl_cmd { __be64 eqaddr; }; -#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) -#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) - -#define FW_EQ_CTRL_CMD_ALLOC (1U << 31) -#define FW_EQ_CTRL_CMD_FREE (1U << 30) -#define FW_EQ_CTRL_CMD_MODIFY (1U << 29) -#define FW_EQ_CTRL_CMD_EQSTART (1U << 28) -#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) - -#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) -#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) -#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) -#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) -#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) -#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) -#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) +#define FW_EQ_CTRL_CMD_PFN_S 8 +#define FW_EQ_CTRL_CMD_PFN_V(x) ((x) << FW_EQ_CTRL_CMD_PFN_S) + +#define FW_EQ_CTRL_CMD_VFN_S 0 +#define FW_EQ_CTRL_CMD_VFN_V(x) ((x) << FW_EQ_CTRL_CMD_VFN_S) + +#define FW_EQ_CTRL_CMD_ALLOC_S 31 +#define FW_EQ_CTRL_CMD_ALLOC_V(x) ((x) << FW_EQ_CTRL_CMD_ALLOC_S) +#define FW_EQ_CTRL_CMD_ALLOC_F FW_EQ_CTRL_CMD_ALLOC_V(1U) + +#define FW_EQ_CTRL_CMD_FREE_S 30 +#define FW_EQ_CTRL_CMD_FREE_V(x) ((x) << FW_EQ_CTRL_CMD_FREE_S) +#define FW_EQ_CTRL_CMD_FREE_F FW_EQ_CTRL_CMD_FREE_V(1U) + +#define FW_EQ_CTRL_CMD_MODIFY_S 29 +#define FW_EQ_CTRL_CMD_MODIFY_V(x) ((x) << FW_EQ_CTRL_CMD_MODIFY_S) +#define FW_EQ_CTRL_CMD_MODIFY_F FW_EQ_CTRL_CMD_MODIFY_V(1U) + +#define FW_EQ_CTRL_CMD_EQSTART_S 28 +#define FW_EQ_CTRL_CMD_EQSTART_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTART_S) +#define FW_EQ_CTRL_CMD_EQSTART_F FW_EQ_CTRL_CMD_EQSTART_V(1U) + +#define FW_EQ_CTRL_CMD_EQSTOP_S 27 +#define FW_EQ_CTRL_CMD_EQSTOP_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTOP_S) +#define FW_EQ_CTRL_CMD_EQSTOP_F FW_EQ_CTRL_CMD_EQSTOP_V(1U) + +#define FW_EQ_CTRL_CMD_CMPLIQID_S 20 +#define FW_EQ_CTRL_CMD_CMPLIQID_V(x) ((x) << FW_EQ_CTRL_CMD_CMPLIQID_S) + +#define FW_EQ_CTRL_CMD_EQID_S 0 +#define FW_EQ_CTRL_CMD_EQID_M 0xfffff +#define FW_EQ_CTRL_CMD_EQID_V(x) ((x) << FW_EQ_CTRL_CMD_EQID_S) +#define FW_EQ_CTRL_CMD_EQID_G(x) \ + (((x) >> FW_EQ_CTRL_CMD_EQID_S) & FW_EQ_CTRL_CMD_EQID_M) + +#define FW_EQ_CTRL_CMD_PHYSEQID_S 0 +#define FW_EQ_CTRL_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_CTRL_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_CTRL_CMD_PHYSEQID_S) & FW_EQ_CTRL_CMD_PHYSEQID_M) + +#define FW_EQ_CTRL_CMD_FETCHSZM_S 26 +#define FW_EQ_CTRL_CMD_FETCHSZM_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHSZM_S) +#define FW_EQ_CTRL_CMD_FETCHSZM_F FW_EQ_CTRL_CMD_FETCHSZM_V(1U) + +#define FW_EQ_CTRL_CMD_STATUSPGNS_S 25 +#define FW_EQ_CTRL_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGNS_S) +#define FW_EQ_CTRL_CMD_STATUSPGNS_F FW_EQ_CTRL_CMD_STATUSPGNS_V(1U) + +#define FW_EQ_CTRL_CMD_STATUSPGRO_S 24 +#define FW_EQ_CTRL_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGRO_S) +#define FW_EQ_CTRL_CMD_STATUSPGRO_F FW_EQ_CTRL_CMD_STATUSPGRO_V(1U) + +#define FW_EQ_CTRL_CMD_FETCHNS_S 23 +#define FW_EQ_CTRL_CMD_FETCHNS_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHNS_S) +#define FW_EQ_CTRL_CMD_FETCHNS_F FW_EQ_CTRL_CMD_FETCHNS_V(1U) + +#define FW_EQ_CTRL_CMD_FETCHRO_S 22 +#define FW_EQ_CTRL_CMD_FETCHRO_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHRO_S) +#define FW_EQ_CTRL_CMD_FETCHRO_F FW_EQ_CTRL_CMD_FETCHRO_V(1U) + +#define FW_EQ_CTRL_CMD_HOSTFCMODE_S 20 +#define FW_EQ_CTRL_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_CTRL_CMD_HOSTFCMODE_S) + +#define FW_EQ_CTRL_CMD_CPRIO_S 19 +#define FW_EQ_CTRL_CMD_CPRIO_V(x) ((x) << FW_EQ_CTRL_CMD_CPRIO_S) + +#define FW_EQ_CTRL_CMD_ONCHIP_S 18 +#define FW_EQ_CTRL_CMD_ONCHIP_V(x) ((x) << FW_EQ_CTRL_CMD_ONCHIP_S) + +#define FW_EQ_CTRL_CMD_PCIECHN_S 16 +#define FW_EQ_CTRL_CMD_PCIECHN_V(x) ((x) << FW_EQ_CTRL_CMD_PCIECHN_S) + +#define FW_EQ_CTRL_CMD_IQID_S 0 +#define FW_EQ_CTRL_CMD_IQID_V(x) ((x) << FW_EQ_CTRL_CMD_IQID_S) + +#define FW_EQ_CTRL_CMD_DCAEN_S 31 +#define FW_EQ_CTRL_CMD_DCAEN_V(x) ((x) << FW_EQ_CTRL_CMD_DCAEN_S) + +#define FW_EQ_CTRL_CMD_DCACPU_S 26 +#define FW_EQ_CTRL_CMD_DCACPU_V(x) ((x) << FW_EQ_CTRL_CMD_DCACPU_S) + +#define FW_EQ_CTRL_CMD_FBMIN_S 23 +#define FW_EQ_CTRL_CMD_FBMIN_V(x) ((x) << FW_EQ_CTRL_CMD_FBMIN_S) + +#define FW_EQ_CTRL_CMD_FBMAX_S 20 +#define FW_EQ_CTRL_CMD_FBMAX_V(x) ((x) << FW_EQ_CTRL_CMD_FBMAX_S) + +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_V(x) \ + ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_CTRL_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_CTRL_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESH_S) + +#define FW_EQ_CTRL_CMD_EQSIZE_S 0 +#define FW_EQ_CTRL_CMD_EQSIZE_V(x) ((x) << FW_EQ_CTRL_CMD_EQSIZE_S) struct fw_eq_ofld_cmd { __be32 op_to_vfn; @@ -1283,45 +1734,112 @@ struct fw_eq_ofld_cmd { __be64 eqaddr; }; -#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) -#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) - -#define FW_EQ_OFLD_CMD_ALLOC (1U << 31) -#define FW_EQ_OFLD_CMD_FREE (1U << 30) -#define FW_EQ_OFLD_CMD_MODIFY (1U << 29) -#define FW_EQ_OFLD_CMD_EQSTART (1U << 28) -#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) - -#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) -#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) -#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) -#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) -#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) -#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) -#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) +#define FW_EQ_OFLD_CMD_PFN_S 8 +#define FW_EQ_OFLD_CMD_PFN_V(x) ((x) << FW_EQ_OFLD_CMD_PFN_S) + +#define FW_EQ_OFLD_CMD_VFN_S 0 +#define FW_EQ_OFLD_CMD_VFN_V(x) ((x) << FW_EQ_OFLD_CMD_VFN_S) + +#define FW_EQ_OFLD_CMD_ALLOC_S 31 +#define FW_EQ_OFLD_CMD_ALLOC_V(x) ((x) << FW_EQ_OFLD_CMD_ALLOC_S) +#define FW_EQ_OFLD_CMD_ALLOC_F FW_EQ_OFLD_CMD_ALLOC_V(1U) + +#define FW_EQ_OFLD_CMD_FREE_S 30 +#define FW_EQ_OFLD_CMD_FREE_V(x) ((x) << FW_EQ_OFLD_CMD_FREE_S) +#define FW_EQ_OFLD_CMD_FREE_F FW_EQ_OFLD_CMD_FREE_V(1U) + +#define FW_EQ_OFLD_CMD_MODIFY_S 29 +#define FW_EQ_OFLD_CMD_MODIFY_V(x) ((x) << FW_EQ_OFLD_CMD_MODIFY_S) +#define FW_EQ_OFLD_CMD_MODIFY_F FW_EQ_OFLD_CMD_MODIFY_V(1U) + +#define FW_EQ_OFLD_CMD_EQSTART_S 28 +#define FW_EQ_OFLD_CMD_EQSTART_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTART_S) +#define FW_EQ_OFLD_CMD_EQSTART_F FW_EQ_OFLD_CMD_EQSTART_V(1U) + +#define FW_EQ_OFLD_CMD_EQSTOP_S 27 +#define FW_EQ_OFLD_CMD_EQSTOP_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTOP_S) +#define FW_EQ_OFLD_CMD_EQSTOP_F FW_EQ_OFLD_CMD_EQSTOP_V(1U) + +#define FW_EQ_OFLD_CMD_EQID_S 0 +#define FW_EQ_OFLD_CMD_EQID_M 0xfffff +#define FW_EQ_OFLD_CMD_EQID_V(x) ((x) << FW_EQ_OFLD_CMD_EQID_S) +#define FW_EQ_OFLD_CMD_EQID_G(x) \ + (((x) >> FW_EQ_OFLD_CMD_EQID_S) & FW_EQ_OFLD_CMD_EQID_M) + +#define FW_EQ_OFLD_CMD_PHYSEQID_S 0 +#define FW_EQ_OFLD_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_OFLD_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_OFLD_CMD_PHYSEQID_S) & FW_EQ_OFLD_CMD_PHYSEQID_M) + +#define FW_EQ_OFLD_CMD_FETCHSZM_S 26 +#define FW_EQ_OFLD_CMD_FETCHSZM_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHSZM_S) + +#define FW_EQ_OFLD_CMD_STATUSPGNS_S 25 +#define FW_EQ_OFLD_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGNS_S) + +#define FW_EQ_OFLD_CMD_STATUSPGRO_S 24 +#define FW_EQ_OFLD_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGRO_S) + +#define FW_EQ_OFLD_CMD_FETCHNS_S 23 +#define FW_EQ_OFLD_CMD_FETCHNS_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHNS_S) + +#define FW_EQ_OFLD_CMD_FETCHRO_S 22 +#define FW_EQ_OFLD_CMD_FETCHRO_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHRO_S) +#define FW_EQ_OFLD_CMD_FETCHRO_F FW_EQ_OFLD_CMD_FETCHRO_V(1U) + +#define FW_EQ_OFLD_CMD_HOSTFCMODE_S 20 +#define FW_EQ_OFLD_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_OFLD_CMD_HOSTFCMODE_S) + +#define FW_EQ_OFLD_CMD_CPRIO_S 19 +#define FW_EQ_OFLD_CMD_CPRIO_V(x) ((x) << FW_EQ_OFLD_CMD_CPRIO_S) + +#define FW_EQ_OFLD_CMD_ONCHIP_S 18 +#define FW_EQ_OFLD_CMD_ONCHIP_V(x) ((x) << FW_EQ_OFLD_CMD_ONCHIP_S) + +#define FW_EQ_OFLD_CMD_PCIECHN_S 16 +#define FW_EQ_OFLD_CMD_PCIECHN_V(x) ((x) << FW_EQ_OFLD_CMD_PCIECHN_S) + +#define FW_EQ_OFLD_CMD_IQID_S 0 +#define FW_EQ_OFLD_CMD_IQID_V(x) ((x) << FW_EQ_OFLD_CMD_IQID_S) + +#define FW_EQ_OFLD_CMD_DCAEN_S 31 +#define FW_EQ_OFLD_CMD_DCAEN_V(x) ((x) << FW_EQ_OFLD_CMD_DCAEN_S) + +#define FW_EQ_OFLD_CMD_DCACPU_S 26 +#define FW_EQ_OFLD_CMD_DCACPU_V(x) ((x) << FW_EQ_OFLD_CMD_DCACPU_S) + +#define FW_EQ_OFLD_CMD_FBMIN_S 23 +#define FW_EQ_OFLD_CMD_FBMIN_V(x) ((x) << FW_EQ_OFLD_CMD_FBMIN_S) + +#define FW_EQ_OFLD_CMD_FBMAX_S 20 +#define FW_EQ_OFLD_CMD_FBMAX_V(x) ((x) << FW_EQ_OFLD_CMD_FBMAX_S) + +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(x) \ + ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_OFLD_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_OFLD_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESH_S) + +#define FW_EQ_OFLD_CMD_EQSIZE_S 0 +#define FW_EQ_OFLD_CMD_EQSIZE_V(x) ((x) << FW_EQ_OFLD_CMD_EQSIZE_S) /* * Macros for VIID parsing: * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number */ -#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) -#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) -#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) + +#define FW_VIID_PFN_S 8 +#define FW_VIID_PFN_M 0x7 +#define FW_VIID_PFN_G(x) (((x) >> FW_VIID_PFN_S) & FW_VIID_PFN_M) + +#define FW_VIID_VIVLD_S 7 +#define FW_VIID_VIVLD_M 0x1 +#define FW_VIID_VIVLD_G(x) (((x) >> FW_VIID_VIVLD_S) & FW_VIID_VIVLD_M) + +#define FW_VIID_VIN_S 0 +#define FW_VIID_VIN_M 0x7F +#define FW_VIID_VIN_G(x) (((x) >> FW_VIID_VIN_S) & FW_VIID_VIN_M) struct fw_vi_cmd { __be32 op_to_vfn; @@ -1341,15 +1859,35 @@ struct fw_vi_cmd { __be64 r10; }; -#define FW_VI_CMD_PFN(x) ((x) << 8) -#define FW_VI_CMD_VFN(x) ((x) << 0) -#define FW_VI_CMD_ALLOC (1U << 31) -#define FW_VI_CMD_FREE (1U << 30) -#define FW_VI_CMD_VIID(x) ((x) << 0) -#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff) -#define FW_VI_CMD_PORTID(x) ((x) << 4) -#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf) -#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) +#define FW_VI_CMD_PFN_S 8 +#define FW_VI_CMD_PFN_V(x) ((x) << FW_VI_CMD_PFN_S) + +#define FW_VI_CMD_VFN_S 0 +#define FW_VI_CMD_VFN_V(x) ((x) << FW_VI_CMD_VFN_S) + +#define FW_VI_CMD_ALLOC_S 31 +#define FW_VI_CMD_ALLOC_V(x) ((x) << FW_VI_CMD_ALLOC_S) +#define FW_VI_CMD_ALLOC_F FW_VI_CMD_ALLOC_V(1U) + +#define FW_VI_CMD_FREE_S 30 +#define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S) +#define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U) + +#define FW_VI_CMD_VIID_S 0 +#define FW_VI_CMD_VIID_M 0xfff +#define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S) +#define FW_VI_CMD_VIID_G(x) (((x) >> FW_VI_CMD_VIID_S) & FW_VI_CMD_VIID_M) + +#define FW_VI_CMD_PORTID_S 4 +#define FW_VI_CMD_PORTID_M 0xf +#define FW_VI_CMD_PORTID_V(x) ((x) << FW_VI_CMD_PORTID_S) +#define FW_VI_CMD_PORTID_G(x) \ + (((x) >> FW_VI_CMD_PORTID_S) & FW_VI_CMD_PORTID_M) + +#define FW_VI_CMD_RSSSIZE_S 0 +#define FW_VI_CMD_RSSSIZE_M 0x7ff +#define FW_VI_CMD_RSSSIZE_G(x) \ + (((x) >> FW_VI_CMD_RSSSIZE_S) & FW_VI_CMD_RSSSIZE_M) /* Special VI_MAC command index ids */ #define FW_VI_MAC_ADD_MAC 0x3FF @@ -1385,16 +1923,37 @@ struct fw_vi_mac_cmd { } u; }; -#define FW_VI_MAC_CMD_VIID(x) ((x) << 0) -#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) -#define FW_VI_MAC_CMD_HASHVECEN (1U << 23) -#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) -#define FW_VI_MAC_CMD_VALID (1U << 15) -#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) -#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) -#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) -#define FW_VI_MAC_CMD_IDX(x) ((x) << 0) -#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) +#define FW_VI_MAC_CMD_VIID_S 0 +#define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S) + +#define FW_VI_MAC_CMD_FREEMACS_S 31 +#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S) + +#define FW_VI_MAC_CMD_HASHVECEN_S 23 +#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S) +#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U) + +#define FW_VI_MAC_CMD_HASHUNIEN_S 22 +#define FW_VI_MAC_CMD_HASHUNIEN_V(x) ((x) << FW_VI_MAC_CMD_HASHUNIEN_S) + +#define FW_VI_MAC_CMD_VALID_S 15 +#define FW_VI_MAC_CMD_VALID_V(x) ((x) << FW_VI_MAC_CMD_VALID_S) +#define FW_VI_MAC_CMD_VALID_F FW_VI_MAC_CMD_VALID_V(1U) + +#define FW_VI_MAC_CMD_PRIO_S 12 +#define FW_VI_MAC_CMD_PRIO_V(x) ((x) << FW_VI_MAC_CMD_PRIO_S) + +#define FW_VI_MAC_CMD_SMAC_RESULT_S 10 +#define FW_VI_MAC_CMD_SMAC_RESULT_M 0x3 +#define FW_VI_MAC_CMD_SMAC_RESULT_V(x) ((x) << FW_VI_MAC_CMD_SMAC_RESULT_S) +#define FW_VI_MAC_CMD_SMAC_RESULT_G(x) \ + (((x) >> FW_VI_MAC_CMD_SMAC_RESULT_S) & FW_VI_MAC_CMD_SMAC_RESULT_M) + +#define FW_VI_MAC_CMD_IDX_S 0 +#define FW_VI_MAC_CMD_IDX_M 0x3ff +#define FW_VI_MAC_CMD_IDX_V(x) ((x) << FW_VI_MAC_CMD_IDX_S) +#define FW_VI_MAC_CMD_IDX_G(x) \ + (((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M) #define FW_RXMODE_MTU_NO_CHG 65535 @@ -1405,17 +1964,30 @@ struct fw_vi_rxmode_cmd { __be32 r4_lo; }; -#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) -#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff -#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) -#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) -#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) -#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) -#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8) +#define FW_VI_RXMODE_CMD_VIID_S 0 +#define FW_VI_RXMODE_CMD_VIID_V(x) ((x) << FW_VI_RXMODE_CMD_VIID_S) + +#define FW_VI_RXMODE_CMD_MTU_S 16 +#define FW_VI_RXMODE_CMD_MTU_M 0xffff +#define FW_VI_RXMODE_CMD_MTU_V(x) ((x) << FW_VI_RXMODE_CMD_MTU_S) + +#define FW_VI_RXMODE_CMD_PROMISCEN_S 14 +#define FW_VI_RXMODE_CMD_PROMISCEN_M 0x3 +#define FW_VI_RXMODE_CMD_PROMISCEN_V(x) ((x) << FW_VI_RXMODE_CMD_PROMISCEN_S) + +#define FW_VI_RXMODE_CMD_ALLMULTIEN_S 12 +#define FW_VI_RXMODE_CMD_ALLMULTIEN_M 0x3 +#define FW_VI_RXMODE_CMD_ALLMULTIEN_V(x) \ + ((x) << FW_VI_RXMODE_CMD_ALLMULTIEN_S) + +#define FW_VI_RXMODE_CMD_BROADCASTEN_S 10 +#define FW_VI_RXMODE_CMD_BROADCASTEN_M 0x3 +#define FW_VI_RXMODE_CMD_BROADCASTEN_V(x) \ + ((x) << FW_VI_RXMODE_CMD_BROADCASTEN_S) + +#define FW_VI_RXMODE_CMD_VLANEXEN_S 8 +#define FW_VI_RXMODE_CMD_VLANEXEN_M 0x3 +#define FW_VI_RXMODE_CMD_VLANEXEN_V(x) ((x) << FW_VI_RXMODE_CMD_VLANEXEN_S) struct fw_vi_enable_cmd { __be32 op_to_viid; @@ -1425,11 +1997,21 @@ struct fw_vi_enable_cmd { __be32 r4; }; -#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) -#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) -#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) -#define FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << 28) -#define FW_VI_ENABLE_CMD_LED (1U << 29) +#define FW_VI_ENABLE_CMD_VIID_S 0 +#define FW_VI_ENABLE_CMD_VIID_V(x) ((x) << FW_VI_ENABLE_CMD_VIID_S) + +#define FW_VI_ENABLE_CMD_IEN_S 31 +#define FW_VI_ENABLE_CMD_IEN_V(x) ((x) << FW_VI_ENABLE_CMD_IEN_S) + +#define FW_VI_ENABLE_CMD_EEN_S 30 +#define FW_VI_ENABLE_CMD_EEN_V(x) ((x) << FW_VI_ENABLE_CMD_EEN_S) + +#define FW_VI_ENABLE_CMD_LED_S 29 +#define FW_VI_ENABLE_CMD_LED_V(x) ((x) << FW_VI_ENABLE_CMD_LED_S) +#define FW_VI_ENABLE_CMD_LED_F FW_VI_ENABLE_CMD_LED_V(1U) + +#define FW_VI_ENABLE_CMD_DCB_INFO_S 28 +#define FW_VI_ENABLE_CMD_DCB_INFO_V(x) ((x) << FW_VI_ENABLE_CMD_DCB_INFO_S) /* VI VF stats offset definitions */ #define VI_VF_NUM_STATS 16 @@ -1529,9 +2111,14 @@ struct fw_vi_stats_cmd { } u; }; -#define FW_VI_STATS_CMD_VIID(x) ((x) << 0) -#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) -#define FW_VI_STATS_CMD_IX(x) ((x) << 0) +#define FW_VI_STATS_CMD_VIID_S 0 +#define FW_VI_STATS_CMD_VIID_V(x) ((x) << FW_VI_STATS_CMD_VIID_S) + +#define FW_VI_STATS_CMD_NSTATS_S 12 +#define FW_VI_STATS_CMD_NSTATS_V(x) ((x) << FW_VI_STATS_CMD_NSTATS_S) + +#define FW_VI_STATS_CMD_IX_S 0 +#define FW_VI_STATS_CMD_IX_V(x) ((x) << FW_VI_STATS_CMD_IX_S) struct fw_acl_mac_cmd { __be32 op_to_vfn; @@ -1548,9 +2135,14 @@ struct fw_acl_mac_cmd { u8 macaddr3[6]; }; -#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) -#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) -#define FW_ACL_MAC_CMD_EN(x) ((x) << 31) +#define FW_ACL_MAC_CMD_PFN_S 8 +#define FW_ACL_MAC_CMD_PFN_V(x) ((x) << FW_ACL_MAC_CMD_PFN_S) + +#define FW_ACL_MAC_CMD_VFN_S 0 +#define FW_ACL_MAC_CMD_VFN_V(x) ((x) << FW_ACL_MAC_CMD_VFN_S) + +#define FW_ACL_MAC_CMD_EN_S 31 +#define FW_ACL_MAC_CMD_EN_V(x) ((x) << FW_ACL_MAC_CMD_EN_S) struct fw_acl_vlan_cmd { __be32 op_to_vfn; @@ -1561,11 +2153,20 @@ struct fw_acl_vlan_cmd { __be16 vlanid[16]; }; -#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) -#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) -#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) -#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) -#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) +#define FW_ACL_VLAN_CMD_PFN_S 8 +#define FW_ACL_VLAN_CMD_PFN_V(x) ((x) << FW_ACL_VLAN_CMD_PFN_S) + +#define FW_ACL_VLAN_CMD_VFN_S 0 +#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S) + +#define FW_ACL_VLAN_CMD_EN_S 31 +#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S) + +#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7 +#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S) + +#define FW_ACL_VLAN_CMD_FM_S 6 +#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S) enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, @@ -1587,13 +2188,14 @@ enum fw_port_cap { }; enum fw_port_mdi { - FW_PORT_MDI_UNCHANGED, - FW_PORT_MDI_AUTO, - FW_PORT_MDI_F_STRAIGHT, - FW_PORT_MDI_F_CROSSOVER + FW_PORT_CAP_MDI_UNCHANGED, + FW_PORT_CAP_MDI_AUTO, + FW_PORT_CAP_MDI_F_STRAIGHT, + FW_PORT_CAP_MDI_F_CROSSOVER }; -#define FW_PORT_MDI(x) ((x) << 9) +#define FW_PORT_CAP_MDI_S 9 +#define FW_PORT_CAP_MDI_V(x) ((x) << FW_PORT_CAP_MDI_S) enum fw_port_action { FW_PORT_ACTION_L1_CFG = 0x0001, @@ -1753,52 +2355,105 @@ struct fw_port_cmd { } u; }; -#define FW_PORT_CMD_READ (1U << 22) - -#define FW_PORT_CMD_PORTID(x) ((x) << 0) -#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) - -#define FW_PORT_CMD_ACTION(x) ((x) << 16) -#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff) - -#define FW_PORT_CMD_CTLBF(x) ((x) << 10) -#define FW_PORT_CMD_OVLAN3(x) ((x) << 7) -#define FW_PORT_CMD_OVLAN2(x) ((x) << 6) -#define FW_PORT_CMD_OVLAN1(x) ((x) << 5) -#define FW_PORT_CMD_OVLAN0(x) ((x) << 4) -#define FW_PORT_CMD_IVLAN0(x) ((x) << 3) - -#define FW_PORT_CMD_TXIPG(x) ((x) << 19) - -#define FW_PORT_CMD_LSTATUS (1U << 31) -#define FW_PORT_CMD_LSTATUS_GET(x) (((x) >> 31) & 0x1) -#define FW_PORT_CMD_LSPEED(x) ((x) << 24) -#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) -#define FW_PORT_CMD_TXPAUSE (1U << 23) -#define FW_PORT_CMD_RXPAUSE (1U << 22) -#define FW_PORT_CMD_MDIOCAP (1U << 21) -#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) -#define FW_PORT_CMD_LPTXPAUSE (1U << 15) -#define FW_PORT_CMD_LPRXPAUSE (1U << 14) -#define FW_PORT_CMD_PTYPE_MASK 0x1f -#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) -#define FW_PORT_CMD_MODTYPE_MASK 0x1f -#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) - -#define FW_PORT_CMD_DCBXDIS (1U << 7) -#define FW_PORT_CMD_APPLY (1U << 7) -#define FW_PORT_CMD_ALL_SYNCD (1U << 7) -#define FW_PORT_CMD_DCB_VERSION_GET(x) (((x) >> 8) & 0xf) - -#define FW_PORT_CMD_PPPEN(x) ((x) << 31) -#define FW_PORT_CMD_TPSRC(x) ((x) << 28) -#define FW_PORT_CMD_NCSISRC(x) ((x) << 24) - -#define FW_PORT_CMD_CH0(x) ((x) << 20) -#define FW_PORT_CMD_CH1(x) ((x) << 16) -#define FW_PORT_CMD_CH2(x) ((x) << 12) -#define FW_PORT_CMD_CH3(x) ((x) << 8) -#define FW_PORT_CMD_NCSICH(x) ((x) << 4) +#define FW_PORT_CMD_READ_S 22 +#define FW_PORT_CMD_READ_V(x) ((x) << FW_PORT_CMD_READ_S) +#define FW_PORT_CMD_READ_F FW_PORT_CMD_READ_V(1U) + +#define FW_PORT_CMD_PORTID_S 0 +#define FW_PORT_CMD_PORTID_M 0xf +#define FW_PORT_CMD_PORTID_V(x) ((x) << FW_PORT_CMD_PORTID_S) +#define FW_PORT_CMD_PORTID_G(x) \ + (((x) >> FW_PORT_CMD_PORTID_S) & FW_PORT_CMD_PORTID_M) + +#define FW_PORT_CMD_ACTION_S 16 +#define FW_PORT_CMD_ACTION_M 0xffff +#define FW_PORT_CMD_ACTION_V(x) ((x) << FW_PORT_CMD_ACTION_S) +#define FW_PORT_CMD_ACTION_G(x) \ + (((x) >> FW_PORT_CMD_ACTION_S) & FW_PORT_CMD_ACTION_M) + +#define FW_PORT_CMD_OVLAN3_S 7 +#define FW_PORT_CMD_OVLAN3_V(x) ((x) << FW_PORT_CMD_OVLAN3_S) + +#define FW_PORT_CMD_OVLAN2_S 6 +#define FW_PORT_CMD_OVLAN2_V(x) ((x) << FW_PORT_CMD_OVLAN2_S) + +#define FW_PORT_CMD_OVLAN1_S 5 +#define FW_PORT_CMD_OVLAN1_V(x) ((x) << FW_PORT_CMD_OVLAN1_S) + +#define FW_PORT_CMD_OVLAN0_S 4 +#define FW_PORT_CMD_OVLAN0_V(x) ((x) << FW_PORT_CMD_OVLAN0_S) + +#define FW_PORT_CMD_IVLAN0_S 3 +#define FW_PORT_CMD_IVLAN0_V(x) ((x) << FW_PORT_CMD_IVLAN0_S) + +#define FW_PORT_CMD_TXIPG_S 3 +#define FW_PORT_CMD_TXIPG_V(x) ((x) << FW_PORT_CMD_TXIPG_S) + +#define FW_PORT_CMD_LSTATUS_S 31 +#define FW_PORT_CMD_LSTATUS_M 0x1 +#define FW_PORT_CMD_LSTATUS_V(x) ((x) << FW_PORT_CMD_LSTATUS_S) +#define FW_PORT_CMD_LSTATUS_G(x) \ + (((x) >> FW_PORT_CMD_LSTATUS_S) & FW_PORT_CMD_LSTATUS_M) +#define FW_PORT_CMD_LSTATUS_F FW_PORT_CMD_LSTATUS_V(1U) + +#define FW_PORT_CMD_LSPEED_S 24 +#define FW_PORT_CMD_LSPEED_M 0x3f +#define FW_PORT_CMD_LSPEED_V(x) ((x) << FW_PORT_CMD_LSPEED_S) +#define FW_PORT_CMD_LSPEED_G(x) \ + (((x) >> FW_PORT_CMD_LSPEED_S) & FW_PORT_CMD_LSPEED_M) + +#define FW_PORT_CMD_TXPAUSE_S 23 +#define FW_PORT_CMD_TXPAUSE_V(x) ((x) << FW_PORT_CMD_TXPAUSE_S) +#define FW_PORT_CMD_TXPAUSE_F FW_PORT_CMD_TXPAUSE_V(1U) + +#define FW_PORT_CMD_RXPAUSE_S 22 +#define FW_PORT_CMD_RXPAUSE_V(x) ((x) << FW_PORT_CMD_RXPAUSE_S) +#define FW_PORT_CMD_RXPAUSE_F FW_PORT_CMD_RXPAUSE_V(1U) + +#define FW_PORT_CMD_MDIOCAP_S 21 +#define FW_PORT_CMD_MDIOCAP_V(x) ((x) << FW_PORT_CMD_MDIOCAP_S) +#define FW_PORT_CMD_MDIOCAP_F FW_PORT_CMD_MDIOCAP_V(1U) + +#define FW_PORT_CMD_MDIOADDR_S 16 +#define FW_PORT_CMD_MDIOADDR_M 0x1f +#define FW_PORT_CMD_MDIOADDR_G(x) \ + (((x) >> FW_PORT_CMD_MDIOADDR_S) & FW_PORT_CMD_MDIOADDR_M) + +#define FW_PORT_CMD_LPTXPAUSE_S 15 +#define FW_PORT_CMD_LPTXPAUSE_V(x) ((x) << FW_PORT_CMD_LPTXPAUSE_S) +#define FW_PORT_CMD_LPTXPAUSE_F FW_PORT_CMD_LPTXPAUSE_V(1U) + +#define FW_PORT_CMD_LPRXPAUSE_S 14 +#define FW_PORT_CMD_LPRXPAUSE_V(x) ((x) << FW_PORT_CMD_LPRXPAUSE_S) +#define FW_PORT_CMD_LPRXPAUSE_F FW_PORT_CMD_LPRXPAUSE_V(1U) + +#define FW_PORT_CMD_PTYPE_S 8 +#define FW_PORT_CMD_PTYPE_M 0x1f +#define FW_PORT_CMD_PTYPE_G(x) \ + (((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M) + +#define FW_PORT_CMD_MODTYPE_S 0 +#define FW_PORT_CMD_MODTYPE_M 0x1f +#define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S) +#define FW_PORT_CMD_MODTYPE_G(x) \ + (((x) >> FW_PORT_CMD_MODTYPE_S) & FW_PORT_CMD_MODTYPE_M) + +#define FW_PORT_CMD_DCBXDIS_S 7 +#define FW_PORT_CMD_DCBXDIS_V(x) ((x) << FW_PORT_CMD_DCBXDIS_S) +#define FW_PORT_CMD_DCBXDIS_F FW_PORT_CMD_DCBXDIS_V(1U) + +#define FW_PORT_CMD_APPLY_S 7 +#define FW_PORT_CMD_APPLY_V(x) ((x) << FW_PORT_CMD_APPLY_S) +#define FW_PORT_CMD_APPLY_F FW_PORT_CMD_APPLY_V(1U) + +#define FW_PORT_CMD_ALL_SYNCD_S 7 +#define FW_PORT_CMD_ALL_SYNCD_V(x) ((x) << FW_PORT_CMD_ALL_SYNCD_S) +#define FW_PORT_CMD_ALL_SYNCD_F FW_PORT_CMD_ALL_SYNCD_V(1U) + +#define FW_PORT_CMD_DCB_VERSION_S 12 +#define FW_PORT_CMD_DCB_VERSION_M 0x7 +#define FW_PORT_CMD_DCB_VERSION_G(x) \ + (((x) >> FW_PORT_CMD_DCB_VERSION_S) & FW_PORT_CMD_DCB_VERSION_M) enum fw_port_type { FW_PORT_TYPE_FIBER_XFI, @@ -1817,7 +2472,7 @@ enum fw_port_type { FW_PORT_TYPE_QSFP, FW_PORT_TYPE_BP40_BA, - FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK + FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M }; enum fw_port_module_type { @@ -1828,11 +2483,11 @@ enum fw_port_module_type { FW_PORT_MOD_TYPE_TWINAX_PASSIVE, FW_PORT_MOD_TYPE_TWINAX_ACTIVE, FW_PORT_MOD_TYPE_LRM, - FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_MASK - 3, - FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_MASK - 2, - FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_MASK - 1, + FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_M - 3, + FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_M - 2, + FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_M - 1, - FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK + FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_M }; enum fw_port_mod_sub_type { @@ -1988,11 +2643,6 @@ struct fw_port_stats_cmd { } u; }; -#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) -#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) -#define FW_PORT_STATS_CMD_TX(x) ((x) << 7) -#define FW_PORT_STATS_CMD_IX(x) ((x) << 0) - /* port loopback stats */ #define FW_NUM_LB_STATS 16 enum fw_port_lb_stats_index { @@ -2048,22 +2698,13 @@ struct fw_port_lb_stats_cmd { } u; }; -#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) -#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) -#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) -#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) - struct fw_rss_ind_tbl_cmd { __be32 op_to_viid; -#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) __be32 retval_len16; __be16 niqid; __be16 startidx; __be32 r3; __be32 iq0_to_iq2; -#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) -#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) -#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) __be32 iq3_to_iq5; __be32 iq6_to_iq8; __be32 iq9_to_iq11; @@ -2077,6 +2718,18 @@ struct fw_rss_ind_tbl_cmd { __be32 r15_lo; }; +#define FW_RSS_IND_TBL_CMD_VIID_S 0 +#define FW_RSS_IND_TBL_CMD_VIID_V(x) ((x) << FW_RSS_IND_TBL_CMD_VIID_S) + +#define FW_RSS_IND_TBL_CMD_IQ0_S 20 +#define FW_RSS_IND_TBL_CMD_IQ0_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ0_S) + +#define FW_RSS_IND_TBL_CMD_IQ1_S 10 +#define FW_RSS_IND_TBL_CMD_IQ1_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ1_S) + +#define FW_RSS_IND_TBL_CMD_IQ2_S 0 +#define FW_RSS_IND_TBL_CMD_IQ2_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ2_S) + struct fw_rss_glb_config_cmd { __be32 op_to_write; __be32 retval_len16; @@ -2090,27 +2743,75 @@ struct fw_rss_glb_config_cmd { struct fw_rss_glb_config_basicvirtual { __be32 mode_pkd; __be32 synmapen_to_hashtoeplitz; -#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) -#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) -#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) -#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) -#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) -#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) -#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) -#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) -#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) __be64 r8; __be64 r9; } basicvirtual; } u; }; -#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) -#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf) +#define FW_RSS_GLB_CONFIG_CMD_MODE_S 28 +#define FW_RSS_GLB_CONFIG_CMD_MODE_M 0xf +#define FW_RSS_GLB_CONFIG_CMD_MODE_V(x) ((x) << FW_RSS_GLB_CONFIG_CMD_MODE_S) +#define FW_RSS_GLB_CONFIG_CMD_MODE_G(x) \ + (((x) >> FW_RSS_GLB_CONFIG_CMD_MODE_S) & FW_RSS_GLB_CONFIG_CMD_MODE_M) #define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S 8 +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S 7 +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F \ + FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S 6 +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F \ + FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S 5 +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F \ + FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S 4 +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F \ + FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S 3 +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S 2 +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S 1 +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S) +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F \ + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S 0 +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S) +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F \ + FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(1U) + struct fw_rss_vi_config_cmd { __be32 op_to_viid; #define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) @@ -2124,19 +2825,51 @@ struct fw_rss_vi_config_cmd { struct fw_rss_vi_config_basicvirtual { __be32 r6; __be32 defaultq_to_udpen; -#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) -#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff) -#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) -#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) -#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) -#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) -#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0) __be64 r9; __be64 r10; } basicvirtual; } u; }; +#define FW_RSS_VI_CONFIG_CMD_VIID_S 0 +#define FW_RSS_VI_CONFIG_CMD_VIID_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_VIID_S) + +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S 16 +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M 0x3ff +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(x) \ + (((x) >> FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) & \ + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M) + +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S 4 +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S 3 +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S 2 +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S 1 +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_UDPEN_S 0 +#define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S) +#define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U) + struct fw_clip_cmd { __be32 op_to_write; __be32 alloc_to_len16; @@ -2145,19 +2878,13 @@ struct fw_clip_cmd { __be32 r4[2]; }; -#define S_FW_CLIP_CMD_ALLOC 31 -#define M_FW_CLIP_CMD_ALLOC 0x1 -#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC) -#define G_FW_CLIP_CMD_ALLOC(x) \ - (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC) -#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U) +#define FW_CLIP_CMD_ALLOC_S 31 +#define FW_CLIP_CMD_ALLOC_V(x) ((x) << FW_CLIP_CMD_ALLOC_S) +#define FW_CLIP_CMD_ALLOC_F FW_CLIP_CMD_ALLOC_V(1U) -#define S_FW_CLIP_CMD_FREE 30 -#define M_FW_CLIP_CMD_FREE 0x1 -#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE) -#define G_FW_CLIP_CMD_FREE(x) \ - (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE) -#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U) +#define FW_CLIP_CMD_FREE_S 30 +#define FW_CLIP_CMD_FREE_V(x) ((x) << FW_CLIP_CMD_FREE_S) +#define FW_CLIP_CMD_FREE_F FW_CLIP_CMD_FREE_V(1U) enum fw_error_type { FW_ERROR_TYPE_EXCEPTION = 0x0, @@ -2196,7 +2923,6 @@ struct fw_error_cmd { struct fw_debug_cmd { __be32 op_type; -#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) __be32 len16_pkd; union fw_debug { struct fw_debug_assert { @@ -2219,19 +2945,35 @@ struct fw_debug_cmd { } u; }; -#define FW_PCIE_FW_ERR (1U << 31) -#define FW_PCIE_FW_INIT (1U << 30) -#define FW_PCIE_FW_HALT (1U << 29) -#define FW_PCIE_FW_MASTER_VLD (1U << 15) -#define FW_PCIE_FW_MASTER_MASK 0x7 -#define FW_PCIE_FW_MASTER_SHIFT 12 -#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) -#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ - FW_PCIE_FW_MASTER_MASK) -#define FW_PCIE_FW_EVAL_MASK 0x7 -#define FW_PCIE_FW_EVAL_SHIFT 24 -#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \ - FW_PCIE_FW_EVAL_MASK) +#define FW_DEBUG_CMD_TYPE_S 0 +#define FW_DEBUG_CMD_TYPE_M 0xff +#define FW_DEBUG_CMD_TYPE_G(x) \ + (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M) + +#define PCIE_FW_ERR_S 31 +#define PCIE_FW_ERR_V(x) ((x) << PCIE_FW_ERR_S) +#define PCIE_FW_ERR_F PCIE_FW_ERR_V(1U) + +#define PCIE_FW_INIT_S 30 +#define PCIE_FW_INIT_V(x) ((x) << PCIE_FW_INIT_S) +#define PCIE_FW_INIT_F PCIE_FW_INIT_V(1U) + +#define PCIE_FW_HALT_S 29 +#define PCIE_FW_HALT_V(x) ((x) << PCIE_FW_HALT_S) +#define PCIE_FW_HALT_F PCIE_FW_HALT_V(1U) + +#define PCIE_FW_EVAL_S 24 +#define PCIE_FW_EVAL_M 0x7 +#define PCIE_FW_EVAL_G(x) (((x) >> PCIE_FW_EVAL_S) & PCIE_FW_EVAL_M) + +#define PCIE_FW_MASTER_VLD_S 15 +#define PCIE_FW_MASTER_VLD_V(x) ((x) << PCIE_FW_MASTER_VLD_S) +#define PCIE_FW_MASTER_VLD_F PCIE_FW_MASTER_VLD_V(1U) + +#define PCIE_FW_MASTER_S 12 +#define PCIE_FW_MASTER_M 0x7 +#define PCIE_FW_MASTER_V(x) ((x) << PCIE_FW_MASTER_S) +#define PCIE_FW_MASTER_G(x) (((x) >> PCIE_FW_MASTER_S) & PCIE_FW_MASTER_M) struct fw_hdr { u8 ver; @@ -2259,10 +3001,25 @@ enum fw_hdr_chip { FW_HDR_CHIP_T5 }; -#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) -#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) -#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) -#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) +#define FW_HDR_FW_VER_MAJOR_S 24 +#define FW_HDR_FW_VER_MAJOR_M 0xff +#define FW_HDR_FW_VER_MAJOR_G(x) \ + (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M) + +#define FW_HDR_FW_VER_MINOR_S 16 +#define FW_HDR_FW_VER_MINOR_M 0xff +#define FW_HDR_FW_VER_MINOR_G(x) \ + (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M) + +#define FW_HDR_FW_VER_MICRO_S 8 +#define FW_HDR_FW_VER_MICRO_M 0xff +#define FW_HDR_FW_VER_MICRO_G(x) \ + (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M) + +#define FW_HDR_FW_VER_BUILD_S 0 +#define FW_HDR_FW_VER_BUILD_M 0xff +#define FW_HDR_FW_VER_BUILD_G(x) \ + (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M) enum fw_hdr_intfver { FW_HDR_INTFVER_NIC = 0x00, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 3d06e77d7121510e70c846c5d9a6c0e7526aa736..d00a751f0588d8c65d6060352af578895e5d9f6d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -138,6 +138,8 @@ struct sge_fl { struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */ __be64 *desc; /* address of HW RX descriptor ring */ dma_addr_t addr; /* PCI bus address of hardware ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; /* @@ -178,6 +180,8 @@ struct sge_rspq { u16 abs_id; /* SGE abs QID for the response Q */ __be64 *desc; /* address of hardware response ring */ dma_addr_t phys_addr; /* PCI bus address of ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ unsigned int iqe_len; /* entry size */ unsigned int size; /* capcity of response Q */ struct adapter *adapter; /* our adapter */ @@ -240,6 +244,8 @@ struct sge_txq { struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */ struct sge_qstat *stat; /* queue status entry */ dma_addr_t phys_addr; /* PCI bus address of hardware ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; /* @@ -345,6 +351,7 @@ struct sge { struct adapter { /* PCI resources */ void __iomem *regs; + void __iomem *bar2; struct pci_dev *pdev; struct device *pdev_dev; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0b42bddaf28443dbdc9317feee3dc43fcab8ade9..aa74ec34a4679cbff1905e2af7da5bfcdf71999f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1030,10 +1030,10 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, pktcnt_idx = closest_thres(&adapter->sge, cnt); if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { - v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X( + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | - FW_PARAMS_PARAM_YZ(rspq->cntxt_id); + FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id); err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx); if (err) return err; @@ -1230,14 +1230,14 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev)); + FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev)); } /* @@ -2095,7 +2095,6 @@ static int adap_init0(struct adapter *adapter) unsigned int ethqsets; int err; u32 param, val = 0; - unsigned int chipid; /* * Wait for the device to become ready before proceeding ... @@ -2123,17 +2122,6 @@ static int adap_init0(struct adapter *adapter) return err; } - adapter->params.chip = 0; - switch (adapter->pdev->device >> 12) { - case CHELSIO_T4: - adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); - break; - case CHELSIO_T5: - chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); - adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); - break; - } - /* * Grab basic operational parameters. These will predominantly have * been set up by the Physical Function Driver or will be hard coded @@ -2184,8 +2172,8 @@ static int adap_init0(struct adapter *adapter) * firmware won't understand this and we'll just get * unencapsulated messages ... */ - param = FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); + param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); val = 1; (void) t4vf_set_params(adapter, 1, ¶m, &val); @@ -2594,6 +2582,27 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, goto err_free_adapter; } + /* Wait for the device to become ready before proceeding ... + */ + err = t4vf_prep_adapter(adapter); + if (err) { + dev_err(adapter->pdev_dev, "device didn't become ready:" + " err=%d\n", err); + goto err_unmap_bar0; + } + + /* For T5 and later we want to use the new BAR-based User Doorbells, + * so we need to map BAR2 here ... + */ + if (!is_t4(adapter->params.chip)) { + adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!adapter->bar2) { + dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n"); + err = -ENOMEM; + goto err_unmap_bar0; + } + } /* * Initialize adapter level features. */ @@ -2786,6 +2795,10 @@ err_free_dev: } err_unmap_bar: + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); + +err_unmap_bar0: iounmap(adapter->regs); err_free_adapter: @@ -2856,6 +2869,8 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) free_netdev(netdev); } iounmap(adapter->regs); + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); kfree(adapter); } @@ -2908,67 +2923,18 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } -/* - * PCI Device registration data structures. - */ -#define CH_DEVICE(devid) \ - { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 } - -static const struct pci_device_id cxgb4vf_pci_tbl[] = { - CH_DEVICE(0xb000), /* PE10K FPGA */ - CH_DEVICE(0x4801), /* T420-cr */ - CH_DEVICE(0x4802), /* T422-cr */ - CH_DEVICE(0x4803), /* T440-cr */ - CH_DEVICE(0x4804), /* T420-bch */ - CH_DEVICE(0x4805), /* T440-bch */ - CH_DEVICE(0x4806), /* T460-ch */ - CH_DEVICE(0x4807), /* T420-so */ - CH_DEVICE(0x4808), /* T420-cx */ - CH_DEVICE(0x4809), /* T420-bt */ - CH_DEVICE(0x480a), /* T404-bt */ - CH_DEVICE(0x480d), /* T480-cr */ - CH_DEVICE(0x480e), /* T440-lp-cr */ - CH_DEVICE(0x4880), - CH_DEVICE(0x4881), - CH_DEVICE(0x4882), - CH_DEVICE(0x4883), - CH_DEVICE(0x4884), - CH_DEVICE(0x4885), - CH_DEVICE(0x4886), - CH_DEVICE(0x4887), - CH_DEVICE(0x4888), - CH_DEVICE(0x5801), /* T520-cr */ - CH_DEVICE(0x5802), /* T522-cr */ - CH_DEVICE(0x5803), /* T540-cr */ - CH_DEVICE(0x5804), /* T520-bch */ - CH_DEVICE(0x5805), /* T540-bch */ - CH_DEVICE(0x5806), /* T540-ch */ - CH_DEVICE(0x5807), /* T520-so */ - CH_DEVICE(0x5808), /* T520-cx */ - CH_DEVICE(0x5809), /* T520-bt */ - CH_DEVICE(0x580a), /* T504-bt */ - CH_DEVICE(0x580b), /* T520-sr */ - CH_DEVICE(0x580c), /* T504-bt */ - CH_DEVICE(0x580d), /* T580-cr */ - CH_DEVICE(0x580e), /* T540-lp-cr */ - CH_DEVICE(0x580f), /* Amsterdam */ - CH_DEVICE(0x5810), /* T580-lp-cr */ - CH_DEVICE(0x5811), /* T520-lp-cr */ - CH_DEVICE(0x5812), /* T560-cr */ - CH_DEVICE(0x5813), /* T580-cr */ - CH_DEVICE(0x5814), /* T580-so-cr */ - CH_DEVICE(0x5815), /* T502-bt */ - CH_DEVICE(0x5880), - CH_DEVICE(0x5881), - CH_DEVICE(0x5882), - CH_DEVICE(0x5883), - CH_DEVICE(0x5884), - CH_DEVICE(0x5885), - CH_DEVICE(0x5886), - CH_DEVICE(0x5887), - CH_DEVICE(0x5888), - { 0, } -}; +/* Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static struct pci_device_id cxgb4vf_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x8 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { PCI_VDEVICE(CHELSIO, (devid)), 0 } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } } + +#include "../cxgb4/t4_pci_id_tbl.h" MODULE_DESCRIPTION(DRV_DESC); MODULE_AUTHOR("Chelsio Communications"); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index fdd078d7d82c661ec5c63e8198c9ae6d464366c3..f7fd1317d99675515b78dec60b7fe1b3e5a228c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -118,7 +118,7 @@ enum { * we can specify for immediate data in the firmware Ethernet TX * Work Request. */ - MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK, + MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M, /* * Max size of a WR sent through a control TX queue. @@ -525,19 +525,40 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) { u32 val; - /* - * The SGE keeps track of its Producer and Consumer Indices in terms + /* The SGE keeps track of its Producer and Consumer Indices in terms * of Egress Queue Units so we can only tell it about integral numbers * of multiples of Free List Entries per Egress Queue Units ... */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { - val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); - if (!is_t4(adapter->params.chip)) - val |= DBTYPE(1); + if (is_t4(adapter->params.chip)) + val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); + else + val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) | + DBTYPE(1); + val |= DBPRIO(1); + + /* Make sure all memory writes to the Free List queue are + * committed before we tell the hardware about them. + */ wmb(); - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, - DBPRIO(1) | - QID(fl->cntxt_id) | val); + + /* If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. + */ + if (unlikely(fl->bar2_addr == NULL)) { + t4_write_reg(adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID(fl->cntxt_id) | val); + } else { + writel(val | QID(fl->bar2_qid), + fl->bar2_addr + SGE_UDB_KDOORBELL); + + /* This Write memory Barrier will force the write to + * the User Doorbell area to be flushed. + */ + wmb(); + } fl->pend_cred %= FL_PER_EQ_UNIT; } } @@ -598,6 +619,8 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, */ BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); + gfp |= __GFP_NOWARN; + /* * If we support large pages, prefer large buffers and fail over to * small pages if we can't allocate large pages to satisfy the refill. @@ -608,8 +631,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, goto alloc_small_pages; while (n) { - page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, - s->fl_pg_order); + page = __dev_alloc_pages(gfp, s->fl_pg_order); if (unlikely(!page)) { /* * We've failed inour attempt to allocate a "large @@ -653,7 +675,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, alloc_small_pages: while (n--) { - page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL); + page = __dev_alloc_page(gfp); if (unlikely(!page)) { fl->alloc_failed++; break; @@ -902,7 +924,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, sgl->addr0 = cpu_to_be64(addr[1]); } - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); if (likely(--nfrags == 0)) return; @@ -948,14 +970,74 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, int n) { - /* - * Warn if we write doorbells with the wrong priority and write - * descriptors before telling HW. + /* Make sure that all writes to the TX Descriptors are committed + * before we tell the hardware about them. */ - WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1)); wmb(); - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, - QID(tq->cntxt_id) | PIDX(n)); + + /* If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(tq->bar2_addr == NULL)) { + u32 val = PIDX(n); + + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID(tq->cntxt_id) | val); + } else { + u32 val = PIDX_T5(n); + + /* T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & DBPRIO(1)); + + /* If we're only writing a single Egress Unit and the BAR2 + * Queue ID is 0, we can use the Write Combining Doorbell + * Gather Buffer; otherwise we use the simple doorbell. + */ + if (n == 1 && tq->bar2_qid == 0) { + unsigned int index = (tq->pidx + ? (tq->pidx - 1) + : (tq->size - 1)); + __be64 *src = (__be64 *)&tq->desc[index]; + __be64 __iomem *dst = (__be64 *)(tq->bar2_addr + + SGE_UDB_WCDOORBELL); + unsigned int count = EQ_UNIT / sizeof(__be64); + + /* Copy the TX Descriptor in a tight loop in order to + * try to get it to the adapter in a single Write + * Combined transfer on the PCI-E Bus. If the Write + * Combine fails (say because of an interrupt, etc.) + * the hardware will simply take the last write as a + * simple doorbell write with a PIDX Increment of 1 + * and will fetch the TX Descriptor from memory via + * DMA. + */ + while (count) { + writeq(*src, dst); + src++; + dst++; + count--; + } + } else + writel(val | QID(tq->bar2_qid), + tq->bar2_addr + SGE_UDB_KDOORBELL); + + /* This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + wmb(); + } } /** @@ -1145,7 +1227,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) goto out_free; } - wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { /* * After we're done injecting the Work Request for this @@ -1157,7 +1239,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * has opened up. */ txq_stop(txq); - wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } /* @@ -1187,9 +1269,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; wr->op_immdlen = - cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | - FW_WR_IMMDLEN(sizeof(*lso) + - sizeof(*cpl))); + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(sizeof(*lso) + + sizeof(*cpl))); /* * Fill in the LSO CPL message. */ @@ -1224,8 +1306,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); wr->op_immdlen = - cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | - FW_WR_IMMDLEN(len)); + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(len)); /* * Set up TX Packet CPL pointer, control word and perform @@ -1781,6 +1863,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) unsigned int intr_params; struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); int work_done = process_responses(rspq, budget); + u32 val; if (likely(work_done < budget)) { napi_complete(napi); @@ -1792,11 +1875,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) if (unlikely(work_done == 0)) rspq->unhandled_irqs++; - t4_write_reg(rspq->adapter, - T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(work_done) | - INGRESSQID((u32)rspq->cntxt_id) | - SEINTARM(intr_params)); + val = CIDXINC(work_done) | SEINTARM(intr_params); + if (is_t4(rspq->adapter->params.chip)) { + t4_write_reg(rspq->adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + val | INGRESSQID((u32)rspq->cntxt_id)); + } else { + writel(val | INGRESSQID(rspq->bar2_qid), + rspq->bar2_addr + SGE_UDB_GTS); + wmb(); + } return work_done; } @@ -1821,6 +1909,7 @@ static unsigned int process_intrq(struct adapter *adapter) struct sge *s = &adapter->sge; struct sge_rspq *intrq = &s->intrq; unsigned int work_done; + u32 val; spin_lock(&adapter->sge.intrq_lock); for (work_done = 0; ; work_done++) { @@ -1886,10 +1975,15 @@ static unsigned int process_intrq(struct adapter *adapter) rspq_next(intrq); } - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(work_done) | - INGRESSQID(intrq->cntxt_id) | - SEINTARM(intrq->intr_params)); + val = CIDXINC(work_done) | SEINTARM(intrq->intr_params); + if (is_t4(adapter->params.chip)) + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + val | INGRESSQID(intrq->cntxt_id)); + else { + writel(val | INGRESSQID(intrq->bar2_qid), + intrq->bar2_addr + SGE_UDB_GTS); + wmb(); + } spin_unlock(&adapter->sge.intrq_lock); @@ -2034,6 +2128,35 @@ static void sge_tx_timer_cb(unsigned long data) mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); } +/** + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; + + ret = t4_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; + + return adapter->bar2 + bar2_qoffset; +} + /** * t4vf_sge_alloc_rxq - allocate an SGE RX Queue * @adapter: the adapter @@ -2087,26 +2210,26 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, * into OS-independent common code ... */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC | - FW_IQ_CMD_IQSTART(1) | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F | + FW_IQ_CMD_IQSTART_F | FW_LEN16(cmd)); cmd.type_to_iqandstindex = - cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | - FW_IQ_CMD_IQASYNCH(iqasynch) | - FW_IQ_CMD_VIID(pi->viid) | - FW_IQ_CMD_IQANDST(iqandst) | - FW_IQ_CMD_IQANUS(1) | - FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) | - FW_IQ_CMD_IQANDSTINDEX(intr_dest)); + cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH_V(iqasynch) | + FW_IQ_CMD_VIID_V(pi->viid) | + FW_IQ_CMD_IQANDST_V(iqandst) | + FW_IQ_CMD_IQANUS_V(1) | + FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) | + FW_IQ_CMD_IQANDSTINDEX_V(intr_dest)); cmd.iqdroprss_to_iqesize = - cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) | - FW_IQ_CMD_IQGTSMODE | - FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) | - FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4)); + cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) | + FW_IQ_CMD_IQGTSMODE_F | + FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4)); cmd.iqsize = cpu_to_be16(rspq->size); cmd.iqaddr = cpu_to_be64(rspq->phys_addr); @@ -2140,13 +2263,13 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, */ cmd.iqns_to_fl0congen = cpu_to_be32( - FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) | - FW_IQ_CMD_FL0PACKEN(1) | - FW_IQ_CMD_FL0PADEN(1)); + FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | + FW_IQ_CMD_FL0PACKEN_F | + FW_IQ_CMD_FL0PADEN_F); cmd.fl0dcaen_to_fl0cidxfthresh = cpu_to_be16( - FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) | - FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B)); + FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B)); cmd.fl0size = cpu_to_be16(flsz); cmd.fl0addr = cpu_to_be64(fl->addr); } @@ -2165,6 +2288,10 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, rspq->gen = 1; rspq->next_intr_params = rspq->intr_params; rspq->cntxt_id = be16_to_cpu(rpl.iqid); + rspq->bar2_addr = bar2_address(adapter, + rspq->cntxt_id, + T4_BAR2_QTYPE_INGRESS, + &rspq->bar2_qid); rspq->abs_id = be16_to_cpu(rpl.physiqid); rspq->size--; /* subtract status entry */ rspq->adapter = adapter; @@ -2183,6 +2310,15 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, fl->alloc_failed = 0; fl->large_alloc_failed = 0; fl->starving = 0; + + /* Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! + */ + fl->bar2_addr = bar2_address(adapter, + fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); + refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); } @@ -2250,24 +2386,25 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, * into the common code ... */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | - FW_EQ_ETH_CMD_EQSTART | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F | + FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(cmd)); - cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE | - FW_EQ_ETH_CMD_VIID(pi->viid)); + cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); cmd.fetchszm_to_iqid = - cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | - FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | - FW_EQ_ETH_CMD_IQID(iqid)); + cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) | + FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) | + FW_EQ_ETH_CMD_IQID_V(iqid)); cmd.dcaen_to_eqsize = - cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) | - FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) | - FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) | - FW_EQ_ETH_CMD_EQSIZE(nentries)); + cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) | + FW_EQ_ETH_CMD_CIDXFTHRESH_V( + SGE_CIDXFLUSHTHRESH_32) | + FW_EQ_ETH_CMD_EQSIZE_V(nentries)); cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); /* @@ -2293,9 +2430,13 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, txq->q.cidx = 0; txq->q.pidx = 0; txq->q.stat = (void *)&txq->q.desc[txq->q.size]; - txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd)); + txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd)); + txq->q.bar2_addr = bar2_address(adapter, + txq->q.cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &txq->q.bar2_qid); txq->q.abs_id = - FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd)); + FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd)); txq->txq = devq; txq->tso = 0; txq->tx_cso = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 4b6a6d14d86d98df42bea1de5a6a507d9820374e..8d3237f5e36493aca04ba2fdb952ef4a148fee9c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -67,7 +67,7 @@ enum chip_type { /* * The "len16" field of a Firmware Command Structure ... */ -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) /* * Per-VF statistics. @@ -135,9 +135,12 @@ struct dev_params { struct sge_params { u32 sge_control; /* padding, boundaries, lengths, etc. */ u32 sge_control2; /* T5: more of the same */ - u32 sge_host_page_size; /* RDMA page sizes */ - u32 sge_queues_per_page; /* RDMA queues/page */ - u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ + u32 sge_host_page_size; /* PF0-7 page sizes */ + u32 sge_egress_queues_per_page; /* PF0-7 egress queues/page */ + u32 sge_ingress_queues_per_page;/* PF0-7 ingress queues/page */ + u32 sge_vf_hps; /* host page size for our vf */ + u32 sge_vf_eq_qpp; /* egress queues/page for our VF */ + u32 sge_vf_iq_qpp; /* ingress queues/page for our VF */ u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ u32 sge_congestion_control; /* congestion thresholds, etc. */ @@ -267,6 +270,8 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); } +#define CHELSIO_PCI_ID_VER(dev_id) ((dev_id) >> 12) + static inline int is_t4(enum chip_type chip) { return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; @@ -278,6 +283,13 @@ int t4vf_port_init(struct adapter *, int); int t4vf_fw_reset(struct adapter *); int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + int t4vf_get_sge_params(struct adapter *); int t4vf_get_vpd_params(struct adapter *); int t4vf_get_dev_params(struct adapter *); @@ -309,5 +321,6 @@ int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int, int t4vf_eth_eq_free(struct adapter *, unsigned int); int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); +int t4vf_prep_adapter(struct adapter *); #endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 1e896b923234f6d1b2e21da52fd73b010d45f214..02e8833b7797af63e3c1d43f0ac5ce75450e3c76 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -204,20 +204,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, /* return value in low-order little-endian word */ v = t4_read_reg(adapter, mbox_data); - if (FW_CMD_RETVAL_GET(v)) + if (FW_CMD_RETVAL_G(v)) dump_mbox(adapter, "FW Error", mbox_data); if (rpl) { /* request bit in high-order BE word */ WARN_ON((be32_to_cpu(*(const u32 *)cmd) - & FW_CMD_REQUEST) == 0); + & FW_CMD_REQUEST_F) == 0); get_mbox_rpl(adapter, rpl, size, mbox_data); WARN_ON((be32_to_cpu(*(u32 *)rpl) - & FW_CMD_REQUEST) != 0); + & FW_CMD_REQUEST_F) != 0); } t4_write_reg(adapter, mbox_ctl, MBOWNER(MBOX_OWNER_NONE)); - return -FW_CMD_RETVAL_GET(v); + return -FW_CMD_RETVAL_G(v); } } @@ -287,17 +287,17 @@ int t4vf_port_init(struct adapter *adapter, int pidx) * like MAC address, etc. */ memset(&vi_cmd, 0, sizeof(vi_cmd)); - vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); - vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid)); + vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); if (v) return v; - BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd)); - pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd)); + BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); + pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); /* @@ -308,12 +308,12 @@ int t4vf_port_init(struct adapter *adapter, int pidx) return 0; memset(&port_cmd, 0, sizeof(port_cmd)); - port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | - FW_PORT_CMD_PORTID(pi->port_id)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->port_id)); port_cmd.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(port_cmd)); v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); if (v) @@ -349,8 +349,8 @@ int t4vf_fw_reset(struct adapter *adapter) struct fw_reset_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | - FW_CMD_WRITE); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | + FW_CMD_WRITE_F); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -377,12 +377,12 @@ static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, return -EINVAL; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, param[nparams].mnem), 16); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) p->mnem = htonl(*params++); @@ -415,12 +415,12 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, return -EINVAL; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, param[nparams]), 16); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { p->mnem = cpu_to_be32(*params++); p->val = cpu_to_be32(*vals++); @@ -429,6 +429,95 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } +/** + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.sge_vf_hps + 10; + page_size = 1 << page_shift; + + /* Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS + ? adapter->params.sge.sge_vf_eq_qpp + : adapter->params.sge.sge_vf_iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + /** * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters * @adapter: the adapter @@ -443,20 +532,20 @@ int t4vf_get_sge_params(struct adapter *adapter) u32 params[7], vals[7]; int v; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_CONTROL)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE)); - params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0)); - params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1)); - params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1)); - params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3)); - params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE)); + params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0)); + params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1)); + params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1)); + params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3)); + params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5)); v = t4vf_query_params(adapter, 7, params, vals); if (v) return v; @@ -479,8 +568,8 @@ int t4vf_get_sge_params(struct adapter *adapter) * right value. */ if (!is_t4(adapter->params.chip)) { - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); v = t4vf_query_params(adapter, 1, params, vals); if (v != FW_SUCCESS) { dev_err(adapter->pdev_dev, @@ -491,16 +580,65 @@ int t4vf_get_sge_params(struct adapter *adapter) sge_params->sge_control2 = vals[0]; } - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL)); v = t4vf_query_params(adapter, 2, params, vals); if (v) return v; sge_params->sge_ingress_rx_threshold = vals[0]; sge_params->sge_congestion_control = vals[1]; + /* For T5 and later we want to use the new BAR2 Doorbells. + * Unfortunately, older firmware didn't allow the this register to be + * read. + */ + if (!is_t4(adapter->params.chip)) { + u32 whoami; + unsigned int pf, s_hps, s_qpp; + + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V( + SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V( + SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v != FW_SUCCESS) { + dev_warn(adapter->pdev_dev, + "Unable to get VF SGE Queues/Page; " + "probably old firmware.\n"); + return v; + } + sge_params->sge_egress_queues_per_page = vals[0]; + sge_params->sge_ingress_queues_per_page = vals[1]; + + /* We need the Queues/Page for our VF. This is based on the + * PF from which we're instantiated and is indexed in the + * register we just read. Do it once here so other code in + * the driver can just use it. + */ + whoami = t4_read_reg(adapter, + T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); + pf = SOURCEPF_GET(whoami); + + s_hps = (HOSTPAGESIZEPF0_S + + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); + sge_params->sge_vf_hps = + ((sge_params->sge_host_page_size >> s_hps) + & HOSTPAGESIZEPF0_M); + + s_qpp = (QUEUESPERPAGEPF0_S + + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); + sge_params->sge_vf_eq_qpp = + ((sge_params->sge_egress_queues_per_page >> s_qpp) + & QUEUESPERPAGEPF0_MASK); + sge_params->sge_vf_iq_qpp = + ((sge_params->sge_ingress_queues_per_page >> s_qpp) + & QUEUESPERPAGEPF0_MASK); + } + return 0; } @@ -517,8 +655,8 @@ int t4vf_get_vpd_params(struct adapter *adapter) u32 params[7], vals[7]; int v; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); v = t4vf_query_params(adapter, 1, params, vals); if (v) return v; @@ -540,10 +678,10 @@ int t4vf_get_dev_params(struct adapter *adapter) u32 params[7], vals[7]; int v; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); v = t4vf_query_params(adapter, 2, params, vals); if (v) return v; @@ -571,9 +709,9 @@ int t4vf_get_rss_glb_config(struct adapter *adapter) * our RSS configuration. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) @@ -585,7 +723,7 @@ int t4vf_get_rss_glb_config(struct adapter *adapter) * filtering at this point to weed out modes which don't support * VF Drivers ... */ - rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET( + rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( be32_to_cpu(rpl.u.manual.mode_pkd)); switch (rss->mode) { case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { @@ -593,26 +731,26 @@ int t4vf_get_rss_glb_config(struct adapter *adapter) rpl.u.basicvirtual.synmapen_to_hashtoeplitz); rss->u.basicvirtual.synmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); rss->u.basicvirtual.syn4tupenipv6 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); rss->u.basicvirtual.syn2tupenipv6 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); rss->u.basicvirtual.syn4tupenipv4 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); rss->u.basicvirtual.syn2tupenipv4 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); rss->u.basicvirtual.ofdmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); rss->u.basicvirtual.tnlmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); rss->u.basicvirtual.tnlalllookup = - ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); rss->u.basicvirtual.hashtoeplitz = - ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); /* we need at least Tunnel Map Enable to be set */ if (!rss->u.basicvirtual.tnlmapen) @@ -647,9 +785,9 @@ int t4vf_get_vfres(struct adapter *adapter) * with error on command failure. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) @@ -659,22 +797,22 @@ int t4vf_get_vfres(struct adapter *adapter) * Extract VF resource limits and return success. */ word = be32_to_cpu(rpl.niqflint_niq); - vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word); - vfres->niq = FW_PFVF_CMD_NIQ_GET(word); + vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); + vfres->niq = FW_PFVF_CMD_NIQ_G(word); word = be32_to_cpu(rpl.type_to_neq); - vfres->neq = FW_PFVF_CMD_NEQ_GET(word); - vfres->pmask = FW_PFVF_CMD_PMASK_GET(word); + vfres->neq = FW_PFVF_CMD_NEQ_G(word); + vfres->pmask = FW_PFVF_CMD_PMASK_G(word); word = be32_to_cpu(rpl.tc_to_nexactf); - vfres->tc = FW_PFVF_CMD_TC_GET(word); - vfres->nvi = FW_PFVF_CMD_NVI_GET(word); - vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word); + vfres->tc = FW_PFVF_CMD_TC_G(word); + vfres->nvi = FW_PFVF_CMD_NVI_G(word); + vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); word = be32_to_cpu(rpl.r_caps_to_nethctrl); - vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word); - vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word); - vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word); + vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); + vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); + vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); return 0; } @@ -695,9 +833,9 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, int v; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_RSS_VI_CONFIG_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); @@ -709,17 +847,17 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); config->basicvirtual.ip6fourtupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); config->basicvirtual.ip6twotupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); config->basicvirtual.ip4fourtupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); config->basicvirtual.ip4twotupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); config->basicvirtual.udpen = - ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); config->basicvirtual.defaultq = - FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word); + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); break; } @@ -745,9 +883,9 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, struct fw_rss_vi_config_cmd cmd, rpl; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_RSS_VI_CONFIG_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); switch (adapter->params.rss.mode) { @@ -755,16 +893,16 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, u32 word = 0; if (config->basicvirtual.ip6fourtupen) - word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; if (config->basicvirtual.ip6twotupen) - word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; if (config->basicvirtual.ip4fourtupen) - word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; if (config->basicvirtual.ip4twotupen) - word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; if (config->basicvirtual.udpen) - word |= FW_RSS_VI_CONFIG_CMD_UDPEN; - word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ( + word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; + word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( config->basicvirtual.defaultq); cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); break; @@ -803,10 +941,10 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, * Initialize firmware command template to write the RSS table. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_RSS_IND_TBL_CMD_VIID_V(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); /* @@ -857,9 +995,9 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, if (rsp >= rsp_end) rsp = rspq; } - *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | - FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | - FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); + *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | + FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | + FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); } /* @@ -892,18 +1030,18 @@ int t4vf_alloc_vi(struct adapter *adapter, int port_id) * VIID. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | - FW_VI_CMD_ALLOC); - cmd.portid_pkd = FW_VI_CMD_PORTID(port_id); + FW_VI_CMD_ALLOC_F); + cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) return v; - return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid)); + return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); } /** @@ -922,12 +1060,12 @@ int t4vf_free_vi(struct adapter *adapter, int viid) * Execute a VI command to free the Virtual Interface. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | - FW_VI_CMD_FREE); - cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid)); + FW_VI_CMD_FREE_F); + cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -946,12 +1084,12 @@ int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, struct fw_vi_enable_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) | - FW_VI_ENABLE_CMD_EEN(tx_en) | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | + FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(cmd)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -970,11 +1108,11 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid, struct fw_vi_enable_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(cmd)); cmd.blinkdur = cpu_to_be16(nblinks); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); @@ -1001,28 +1139,28 @@ int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, /* convert to FW values */ if (mtu < 0) - mtu = FW_VI_RXMODE_CMD_MTU_MASK; + mtu = FW_VI_RXMODE_CMD_MTU_M; if (promisc < 0) - promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; if (all_multi < 0) - all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; if (bcast < 0) - bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; if (vlanex < 0) - vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_RXMODE_CMD_VIID(viid)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_RXMODE_CMD_VIID_V(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.mtu_to_vlanexen = - cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) | - FW_VI_RXMODE_CMD_PROMISCEN(promisc) | - FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | - FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | - FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); } @@ -1072,19 +1210,19 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, int i; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + (free ? FW_CMD_EXEC_F : 0) | + FW_VI_MAC_CMD_VIID_V(viid)); cmd.freemacs_to_len16 = - cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16(len16)); + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V(len16)); for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { p->valid_to_idx = cpu_to_be16( - FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); } @@ -1095,7 +1233,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, break; for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET( + u16 index = FW_VI_MAC_CMD_IDX_G( be16_to_cpu(p->valid_to_idx)); if (idx) @@ -1161,19 +1299,19 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_MAC_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); - p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(idx)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_MAC_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (ret == 0) { p = &rpl.u.exact[0]; - ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); + ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); if (ret >= max_naddr) ret = -ENOMEM; } @@ -1198,13 +1336,13 @@ int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, u.exact[0]), 16); memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN | - FW_VI_MAC_CMD_HASHUNIEN(ucast) | - FW_CMD_LEN16(len16)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | + FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | + FW_CMD_LEN16_V(len16)); cmd.u.hash.hashvec = cpu_to_be64(vec); return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); } @@ -1240,14 +1378,14 @@ int t4vf_get_port_stats(struct adapter *adapter, int pidx, int ret; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) | - FW_VI_STATS_CMD_VIID(pi->viid) | - FW_CMD_REQUEST | - FW_CMD_READ); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | + FW_VI_STATS_CMD_VIID_V(pi->viid) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); cmd.u.ctl.nstats_ix = - cpu_to_be16(FW_VI_STATS_CMD_IX(ix) | - FW_VI_STATS_CMD_NSTATS(nstats)); + cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | + FW_VI_STATS_CMD_NSTATS_V(nstats)); ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); if (ret) return ret; @@ -1299,13 +1437,13 @@ int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, struct fw_iq_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(cmd)); cmd.type_to_iqandstindex = - cpu_to_be32(FW_IQ_CMD_TYPE(iqtype)); + cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); cmd.iqid = cpu_to_be16(iqid); cmd.fl0id = cpu_to_be16(fl0id); @@ -1325,12 +1463,12 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) struct fw_eq_eth_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(cmd)); - cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid)); + cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -1344,7 +1482,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) { const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; - u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); + u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); switch (opcode) { case FW_PORT_CMD: { @@ -1359,7 +1497,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) /* * Extract various fields from port status change message. */ - action = FW_PORT_CMD_ACTION_GET( + action = FW_PORT_CMD_ACTION_G( be32_to_cpu(port_cmd->action_to_len16)); if (action != FW_PORT_ACTION_GET_PORT_INFO) { dev_err(adapter->pdev_dev, @@ -1368,24 +1506,24 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) break; } - port_id = FW_PORT_CMD_PORTID_GET( + port_id = FW_PORT_CMD_PORTID_G( be32_to_cpu(port_cmd->op_to_portid)); word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); - link_ok = (word & FW_PORT_CMD_LSTATUS) != 0; + link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0; speed = 0; fc = 0; - if (word & FW_PORT_CMD_RXPAUSE) + if (word & FW_PORT_CMD_RXPAUSE_F) fc |= PAUSE_RX; - if (word & FW_PORT_CMD_TXPAUSE) + if (word & FW_PORT_CMD_TXPAUSE_F) fc |= PAUSE_TX; - if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) speed = 100; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) speed = 1000; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; /* @@ -1420,3 +1558,38 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) } return 0; } + +/** + */ +int t4vf_prep_adapter(struct adapter *adapter) +{ + int err; + unsigned int chipid; + + /* Wait for the device to become ready before proceeding ... + */ + err = t4vf_wait_dev_ready(adapter); + if (err) + return err; + + /* Default port and clock for debugging in case we can't reach + * firmware. + */ + adapter->params.nports = 1; + adapter->params.vfres.pmask = 1; + adapter->params.vpd.cclk = 50000; + + adapter->params.chip = 0; + switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { + case CHELSIO_T4: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + break; + + case CHELSIO_T5: + chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); + break; + } + + return 0; +} diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index e285f384b0966dfe2ab1e3b138f99b5e1a2bb610..07719676c305fc0b433f614c9ec8365dad88859f 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -216,14 +216,10 @@ struct net_device * __init mac89x0_probe(int unit) ioaddr = (unsigned long) nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE); { - unsigned long flags; int card_present; - local_irq_save(flags); - card_present = (hwreg_present((void*) ioaddr+4) && - hwreg_present((void*) ioaddr + DATA_PORT)); - local_irq_restore(flags); - + card_present = (hwreg_present((void *)ioaddr + 4) && + hwreg_present((void *)ioaddr + DATA_PORT)); if (!card_present) goto out; } diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 5ba5ad071bb619dee3c5842fc23b7ff3c3e19216..25c4d88853d8260fa5efcbf0e043d2ccd968b10b 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -187,6 +187,7 @@ struct enic { unsigned int cq_count; struct enic_rfs_flw_tbl rfs_h; u32 rx_copybreak; + u8 rss_key[ENIC_RSS_LEN]; }; static inline struct device *enic_get_dev(struct enic *enic) @@ -246,5 +247,6 @@ int enic_sriov_enabled(struct enic *enic); int enic_is_valid_vf(struct enic *enic, int vf); int enic_is_dynamic(struct enic *enic); void enic_set_ethtool_ops(struct net_device *netdev); +int __enic_set_rsskey(struct enic *enic); #endif /* _ENIC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 85173d6207584aadc2546f880c22446fee0f09ff..eba1eb846d347ac14679ca060d3cd370660caf7f 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -23,6 +23,7 @@ #include "enic.h" #include "enic_dev.h" #include "enic_clsf.h" +#include "vnic_rss.h" struct enic_stat { char name[ETH_GSTRING_LEN]; @@ -416,6 +417,40 @@ static int enic_set_tunable(struct net_device *dev, return ret; } +static u32 enic_get_rxfh_key_size(struct net_device *netdev) +{ + return ENIC_RSS_LEN; +} + +static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey, + u8 *hfunc) +{ + struct enic *enic = netdev_priv(netdev); + + if (hkey) + memcpy(hkey, enic->rss_key, ENIC_RSS_LEN); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int enic_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *hkey, const u8 hfunc) +{ + struct enic *enic = netdev_priv(netdev); + + if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) || + indir) + return -EINVAL; + + if (hkey) + memcpy(enic->rss_key, hkey, ENIC_RSS_LEN); + + return __enic_set_rsskey(enic); +} + static const struct ethtool_ops enic_ethtool_ops = { .get_settings = enic_get_settings, .get_drvinfo = enic_get_drvinfo, @@ -430,6 +465,9 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_rxnfc = enic_get_rxnfc, .get_tunable = enic_get_tunable, .set_tunable = enic_set_tunable, + .get_rxfh_key_size = enic_get_rxfh_key_size, + .get_rxfh = enic_get_rxfh, + .set_rxfh = enic_set_rxfh, }; void enic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 73cf1653a4a3d4225951334e26506e89145cd1e4..868d0f605d60524053c46d87d010bf50e3341a1d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -283,12 +283,10 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) return IRQ_HANDLED; } - if (ENIC_TEST_INTR(pba, io_intr)) { - if (napi_schedule_prep(&enic->napi[0])) - __napi_schedule(&enic->napi[0]); - } else { + if (ENIC_TEST_INTR(pba, io_intr)) + napi_schedule_irqoff(&enic->napi[0]); + else vnic_intr_unmask(&enic->intr[io_intr]); - } return IRQ_HANDLED; } @@ -313,7 +311,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data) * writes). */ - napi_schedule(&enic->napi[0]); + napi_schedule_irqoff(&enic->napi[0]); return IRQ_HANDLED; } @@ -322,7 +320,7 @@ static irqreturn_t enic_isr_msix(int irq, void *data) { struct napi_struct *napi = data; - napi_schedule(napi); + napi_schedule_irqoff(napi); return IRQ_HANDLED; } @@ -531,8 +529,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, { struct enic *enic = netdev_priv(netdev); struct vnic_wq *wq; - unsigned long flags; unsigned int txq_map; + struct netdev_queue *txq; if (skb->len <= 0) { dev_kfree_skb_any(skb); @@ -541,6 +539,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, txq_map = skb_get_queue_mapping(skb) % enic->wq_count; wq = &enic->wq[txq_map]; + txq = netdev_get_tx_queue(netdev, txq_map); /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, * which is very likely. In the off chance it's going to take @@ -554,23 +553,25 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - spin_lock_irqsave(&enic->wq_lock[txq_map], flags); + spin_lock(&enic->wq_lock[txq_map]); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { - netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); + netif_tx_stop_queue(txq); /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); - spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); + spin_unlock(&enic->wq_lock[txq_map]); return NETDEV_TX_BUSY; } enic_queue_wq_skb(enic, wq, skb); if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) - netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); + netif_tx_stop_queue(txq); + if (!skb->xmit_more || netif_xmit_stopped(txq)) + vnic_wq_doorbell(wq); - spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); + spin_unlock(&enic->wq_lock[txq_map]); return NETDEV_TX_OK; } @@ -1312,9 +1313,10 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget) if (!wq_work_done) { napi_complete(napi); vnic_intr_unmask(&enic->intr[intr]); + return 0; } - return 0; + return budget; } static int enic_poll_msix_rq(struct napi_struct *napi, int budget) @@ -1886,25 +1888,23 @@ static int enic_dev_hang_reset(struct enic *enic) return err; } -static int enic_set_rsskey(struct enic *enic) +int __enic_set_rsskey(struct enic *enic) { + union vnic_rss_key *rss_key_buf_va; dma_addr_t rss_key_buf_pa; - union vnic_rss_key *rss_key_buf_va = NULL; - union vnic_rss_key rss_key = { - .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, - .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}, - .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}, - .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}, - }; - int err; + int i, kidx, bidx, err; - rss_key_buf_va = pci_alloc_consistent(enic->pdev, - sizeof(union vnic_rss_key), &rss_key_buf_pa); + rss_key_buf_va = pci_zalloc_consistent(enic->pdev, + sizeof(union vnic_rss_key), + &rss_key_buf_pa); if (!rss_key_buf_va) return -ENOMEM; - memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); - + for (i = 0; i < ENIC_RSS_LEN; i++) { + kidx = i / ENIC_RSS_BYTES_PER_KEY; + bidx = i % ENIC_RSS_BYTES_PER_KEY; + rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; + } spin_lock_bh(&enic->devcmd_lock); err = enic_set_rss_key(enic, rss_key_buf_pa, @@ -1917,6 +1917,13 @@ static int enic_set_rsskey(struct enic *enic) return err; } +static int enic_set_rsskey(struct enic *enic) +{ + netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); + + return __enic_set_rsskey(enic); +} + static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) { dma_addr_t rss_cpu_buf_pa; diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h index fa421baf45b87eadbf5857843869f1bbcabcdb08..881fa18542b382bc52e222dc4ff47114d052fb00 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rss.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h @@ -20,11 +20,16 @@ #define _VNIC_RSS_H_ /* RSS key array */ + +#define ENIC_RSS_BYTES_PER_KEY 10 +#define ENIC_RSS_KEYS 4 +#define ENIC_RSS_LEN (ENIC_RSS_BYTES_PER_KEY * ENIC_RSS_KEYS) + union vnic_rss_key { struct { - u8 b[10]; + u8 b[ENIC_RSS_BYTES_PER_KEY]; u8 b_pad[6]; - } key[4]; + } key[ENIC_RSS_KEYS]; u64 raw[8]; }; diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h index 2c6c70804a39edb475d4c97e1f3e617aad387ed4..816f1ad6072f142b7e6ac19e372fd6338f2a6efb 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -104,6 +104,17 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq) return wq->to_use->desc; } +static inline void vnic_wq_doorbell(struct vnic_wq *wq) +{ + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(wq->to_use->index, &wq->ctrl->posted_index); +} + static inline void vnic_wq_post(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, int sop, int eop, @@ -122,15 +133,6 @@ static inline void vnic_wq_post(struct vnic_wq *wq, buf->wr_id = wrid; buf = buf->next; - if (eop) { - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &wq->ctrl->posted_index); - } wq->to_use = buf; wq->ring.desc_avail -= desc_skip_cnt; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index cf8b6ff216130b3bfb51cfc183cb2eb25a69cfb2..badff181e719692a9a94b2a5ba1c792fc2bf18fc 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -995,7 +995,6 @@ static void de4x5_dbg_mii(struct net_device *dev, int k); static void de4x5_dbg_media(struct net_device *dev); static void de4x5_dbg_srom(struct de4x5_srom *p); static void de4x5_dbg_rx(struct sk_buff *skb, int len); -static int de4x5_strncmp(char *a, char *b, int n); static int dc21041_infoleaf(struct net_device *dev); static int dc21140_infoleaf(struct net_device *dev); static int dc21142_infoleaf(struct net_device *dev); @@ -4102,8 +4101,7 @@ get_hw_addr(struct net_device *dev) } /* -** Test for enet addresses in the first 32 bytes. The built-in strncmp -** didn't seem to work here...? +** Test for enet addresses in the first 32 bytes. */ static int de4x5_bad_srom(struct de4x5_private *lp) @@ -4111,8 +4109,8 @@ de4x5_bad_srom(struct de4x5_private *lp) int i, status = 0; for (i = 0; i < ARRAY_SIZE(enet_det); i++) { - if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) && - !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) { + if (!memcmp(&lp->srom, &enet_det[i], 3) && + !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) { if (i == 0) { status = SMC; } else if (i == 1) { @@ -4125,18 +4123,6 @@ de4x5_bad_srom(struct de4x5_private *lp) return status; } -static int -de4x5_strncmp(char *a, char *b, int n) -{ - int ret=0; - - for (;n && !ret; n--) { - ret = *a++ - *b++; - } - - return ret; -} - static void srom_repair(struct net_device *dev, int card) { diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index c8205606c7757ff3345acef81d57a5c0118497e3..50a00777228e12b91b33f8bb3b7794f4f07de42c 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -2265,7 +2265,7 @@ static int __init dmfe_init_module(void) static void __exit dmfe_cleanup_module(void) { - DMFE_DBUG(0, "dmfe_clean_module() ", debug); + DMFE_DBUG(0, "dmfe_cleanup_module() ", debug); pci_unregister_driver(&dmfe_driver); } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 4061f9b22812f2c03df0e5076a8bce82f511503e..1c5916b13778a96e489ee3ec1bcb2d1acee63cd2 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -1837,7 +1837,7 @@ static int __init uli526x_init_module(void) static void __exit uli526x_cleanup_module(void) { - ULI526X_DBUG(0, "uli526x_clean_module() ", debug); + ULI526X_DBUG(0, "uli526x_cleanup_module() ", debug); pci_unregister_driver(&uli526x_driver); } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 9a2d75235e89991bd62a7decd437707ce6b78b4b..712e7f8e1df7e820179abbc9c51665a0e315f028 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -522,6 +522,7 @@ struct be_adapter { u8 hba_port_num; u16 pvid; __be16 vxlan_port; + int vxlan_port_count; struct phy_info phy; u8 wol_cap; bool wol_en; diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index e42a791c183579fdddc486e805a46d77518a0909..73a500ccbf69592d0de09b0cc17f4db82e925664 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1171,7 +1171,8 @@ static u32 be_get_rxfh_key_size(struct net_device *netdev) return RSS_HASH_KEY_LEN; } -static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey) +static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey, + u8 *hfunc) { struct be_adapter *adapter = netdev_priv(netdev); int i; @@ -1185,16 +1186,23 @@ static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey) if (hkey) memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN); + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + return 0; } static int be_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *hkey) + const u8 *hkey, const u8 hfunc) { int rc = 0, i, j; struct be_adapter *adapter = netdev_priv(netdev); u8 rsstable[RSS_INDIR_TABLE_LEN]; + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + if (indir) { struct be_rx_obj *rxo; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 597c463e384d0d5d9fb0f46a9b363e9c9165c6cf..2aacd47310511275be8ee4ef317963e588b39922 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -887,7 +887,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, } if (vlan_tag) { - skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), + vlan_tag); if (unlikely(!skb)) return skb; skb->vlan_tci = 0; @@ -896,7 +897,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, /* Insert the outer VLAN, if any */ if (adapter->qnq_vid) { vlan_tag = adapter->qnq_vid; - skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), + vlan_tag); if (unlikely(!skb)) return skb; if (skip_hw_vlan) @@ -1015,9 +1017,8 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, * to pad short packets (<= 32 bytes) to a 36-byte length. */ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { - if (skb_padto(skb, 36)) + if (skb_put_padto(skb, 36)) return NULL; - skb->len = 36; } if (BEx_chip(adapter) || lancer_chip(adapter)) { @@ -2853,10 +2854,10 @@ static int be_close(struct net_device *netdev) static int be_rx_qs_create(struct be_adapter *adapter) { + struct rss_info *rss = &adapter->rss_info; + u8 rss_key[RSS_HASH_KEY_LEN]; struct be_rx_obj *rxo; int rc, i, j; - u8 rss_hkey[RSS_HASH_KEY_LEN]; - struct rss_info *rss = &adapter->rss_info; for_all_rx_queues(adapter, rxo, i) { rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, @@ -2901,15 +2902,15 @@ static int be_rx_qs_create(struct be_adapter *adapter) rss->rss_flags = RSS_ENABLE_NONE; } - get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN); + netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, - 128, rss_hkey); + 128, rss_key); if (rc) { rss->rss_flags = RSS_ENABLE_NONE; return rc; } - memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN); + memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); /* First time posting */ for_all_rx_queues(adapter, rxo, i) @@ -3123,6 +3124,8 @@ static void be_mac_clear(struct be_adapter *adapter) #ifdef CONFIG_BE2NET_VXLAN static void be_disable_vxlan_offloads(struct be_adapter *adapter) { + struct net_device *netdev = adapter->netdev; + if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) be_cmd_manage_iface(adapter, adapter->if_handle, OP_CONVERT_TUNNEL_TO_NORMAL); @@ -3132,6 +3135,9 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter) adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS; adapter->vxlan_port = 0; + + netdev->hw_enc_features = 0; + netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); } #endif @@ -4365,10 +4371,24 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return ndo_dflt_bridge_getlink(skb, pid, seq, dev, hsw_mode == PORT_FWD_TYPE_VEPA ? - BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB); + BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, + 0, 0); } #ifdef CONFIG_BE2NET_VXLAN +/* VxLAN offload Notes: + * + * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't + * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload + * is expected to work across all types of IP tunnels once exported. Skyhawk + * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN + * offloads in hw_enc_features only when a VxLAN port is added. Note this only + * ensures that other tunnels work fine while VxLAN offloads are not enabled. + * + * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack + * adds more than one port, disable offloads and don't re-enable them again + * until after all the tunnels are removed. + */ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { @@ -4380,13 +4400,16 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, return; if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { - dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n", - be16_to_cpu(port)); dev_info(dev, "Only one UDP port supported for VxLAN offloads\n"); - return; + dev_info(dev, "Disabling VxLAN offloads\n"); + adapter->vxlan_port_count++; + goto err; } + if (adapter->vxlan_port_count++ >= 1) + return; + status = be_cmd_manage_iface(adapter, adapter->if_handle, OP_CONVERT_NORMAL_TO_TUNNEL); if (status) { @@ -4402,6 +4425,11 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS; adapter->vxlan_port = port; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); return; @@ -4418,13 +4446,15 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, return; if (adapter->vxlan_port != port) - return; + goto done; be_disable_vxlan_offloads(adapter); dev_info(&adapter->pdev->dev, "Disabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); +done: + adapter->vxlan_port_count--; } static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) @@ -4468,12 +4498,6 @@ static void be_netdev_init(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); - if (skyhawk_chip(adapter)) { - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - } netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 9af296a1ca999f3e659f2ec41d140f14da87149b..469691ad4a1ee25dda5ff9dbec0ef8735ae0f6bf 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -38,9 +38,9 @@ #define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ #define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ #define FEC_OPD 0x0ec /* Opcode + Pause duration */ -#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */ -#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */ -#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */ +#define FEC_TXIC0 0x0f0 /* Tx Interrupt Coalescing for ring 0 */ +#define FEC_TXIC1 0x0f4 /* Tx Interrupt Coalescing for ring 1 */ +#define FEC_TXIC2 0x0f8 /* Tx Interrupt Coalescing for ring 2 */ #define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */ #define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */ #define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */ @@ -53,16 +53,18 @@ #define FEC_R_FSTART 0x150 /* FIFO receive start reg */ #define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */ #define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */ +#define FEC_R_BUFF_SIZE_1 0x168 /* Maximum receive buff ring1 size */ #define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */ #define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */ +#define FEC_R_BUFF_SIZE_2 0x174 /* Maximum receive buff ring2 size */ #define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */ #define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */ -#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ +#define FEC_R_BUFF_SIZE_0 0x188 /* Maximum receive buff size */ #define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ -#define FEC_RACC 0x1C4 /* Receive Accelerator function */ +#define FEC_RACC 0x1c4 /* Receive Accelerator function */ #define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ #define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ #define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */ @@ -82,57 +84,57 @@ #define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ #define RMON_T_PACKETS 0x204 /* RMON TX packet count */ #define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ -#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */ +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */ #define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ #define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ #define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ -#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */ +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */ #define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ #define RMON_T_COL 0x224 /* RMON TX collision count */ #define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ -#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */ +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */ #define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ #define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ #define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ -#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */ +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */ #define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ #define RMON_T_OCTETS 0x244 /* RMON TX octets */ #define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ -#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */ +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */ #define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ #define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ #define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ -#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */ +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */ #define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ #define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ #define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ -#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */ +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */ #define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ #define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ #define RMON_R_PACKETS 0x284 /* RMON RX packet count */ #define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ -#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */ +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */ #define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ #define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ #define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ -#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */ -#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ -#define RMON_R_RESVD_O 0x2A4 /* Reserved */ -#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */ -#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */ -#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */ -#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */ -#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */ -#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */ -#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */ -#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */ -#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */ -#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */ -#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */ -#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */ -#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */ -#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */ -#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */ +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */ +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ +#define RMON_R_RESVD_O 0x2a4 /* Reserved */ +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */ +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */ +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */ +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */ +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */ +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */ +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */ +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */ +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */ +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */ +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */ +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */ +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */ +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */ +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */ #else @@ -165,21 +167,23 @@ #define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */ #define FEC_X_DES_START_1 FEC_X_DES_START_0 #define FEC_X_DES_START_2 FEC_X_DES_START_0 -#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */ +#define FEC_R_BUFF_SIZE_0 0x3d8 /* Maximum receive buff size */ +#define FEC_R_BUFF_SIZE_1 FEC_R_BUFF_SIZE_0 +#define FEC_R_BUFF_SIZE_2 FEC_R_BUFF_SIZE_0 #define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */ /* Not existed in real chip * Just for pass build. */ -#define FEC_RCMR_1 0xFFF -#define FEC_RCMR_2 0xFFF -#define FEC_DMA_CFG_1 0xFFF -#define FEC_DMA_CFG_2 0xFFF -#define FEC_TXIC0 0xFFF -#define FEC_TXIC1 0xFFF -#define FEC_TXIC2 0xFFF -#define FEC_RXIC0 0xFFF -#define FEC_RXIC1 0xFFF -#define FEC_RXIC2 0xFFF +#define FEC_RCMR_1 0xfff +#define FEC_RCMR_2 0xfff +#define FEC_DMA_CFG_1 0xfff +#define FEC_DMA_CFG_2 0xfff +#define FEC_TXIC0 0xfff +#define FEC_TXIC1 0xfff +#define FEC_TXIC2 0xfff +#define FEC_RXIC0 0xfff +#define FEC_RXIC1 0xfff +#define FEC_RXIC2 0xfff #endif /* CONFIG_M5272 */ @@ -213,60 +217,60 @@ struct bufdesc_ex { * The following definitions courtesy of commproc.h, which where * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). */ -#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */ -#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */ -#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */ -#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */ -#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */ -#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */ -#define BD_SC_P ((ushort)0x0100) /* xmt preamble */ -#define BD_SC_BR ((ushort)0x0020) /* Break received */ -#define BD_SC_FR ((ushort)0x0010) /* Framing error */ -#define BD_SC_PR ((ushort)0x0008) /* Parity error */ -#define BD_SC_OV ((ushort)0x0002) /* Overrun */ -#define BD_SC_CD ((ushort)0x0001) /* ?? */ +#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */ +#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */ +#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */ +#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */ +#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */ +#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */ +#define BD_SC_P ((ushort)0x0100) /* xmt preamble */ +#define BD_SC_BR ((ushort)0x0020) /* Break received */ +#define BD_SC_FR ((ushort)0x0010) /* Framing error */ +#define BD_SC_PR ((ushort)0x0008) /* Parity error */ +#define BD_SC_OV ((ushort)0x0002) /* Overrun */ +#define BD_SC_CD ((ushort)0x0001) /* ?? */ /* Buffer descriptor control/status used by Ethernet receive. -*/ -#define BD_ENET_RX_EMPTY ((ushort)0x8000) -#define BD_ENET_RX_WRAP ((ushort)0x2000) -#define BD_ENET_RX_INTR ((ushort)0x1000) -#define BD_ENET_RX_LAST ((ushort)0x0800) -#define BD_ENET_RX_FIRST ((ushort)0x0400) -#define BD_ENET_RX_MISS ((ushort)0x0100) -#define BD_ENET_RX_LG ((ushort)0x0020) -#define BD_ENET_RX_NO ((ushort)0x0010) -#define BD_ENET_RX_SH ((ushort)0x0008) -#define BD_ENET_RX_CR ((ushort)0x0004) -#define BD_ENET_RX_OV ((ushort)0x0002) -#define BD_ENET_RX_CL ((ushort)0x0001) -#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ + */ +#define BD_ENET_RX_EMPTY ((ushort)0x8000) +#define BD_ENET_RX_WRAP ((ushort)0x2000) +#define BD_ENET_RX_INTR ((ushort)0x1000) +#define BD_ENET_RX_LAST ((ushort)0x0800) +#define BD_ENET_RX_FIRST ((ushort)0x0400) +#define BD_ENET_RX_MISS ((ushort)0x0100) +#define BD_ENET_RX_LG ((ushort)0x0020) +#define BD_ENET_RX_NO ((ushort)0x0010) +#define BD_ENET_RX_SH ((ushort)0x0008) +#define BD_ENET_RX_CR ((ushort)0x0004) +#define BD_ENET_RX_OV ((ushort)0x0002) +#define BD_ENET_RX_CL ((ushort)0x0001) +#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ /* Enhanced buffer descriptor control/status used by Ethernet receive */ -#define BD_ENET_RX_VLAN 0x00000004 +#define BD_ENET_RX_VLAN 0x00000004 /* Buffer descriptor control/status used by Ethernet transmit. -*/ -#define BD_ENET_TX_READY ((ushort)0x8000) -#define BD_ENET_TX_PAD ((ushort)0x4000) -#define BD_ENET_TX_WRAP ((ushort)0x2000) -#define BD_ENET_TX_INTR ((ushort)0x1000) -#define BD_ENET_TX_LAST ((ushort)0x0800) -#define BD_ENET_TX_TC ((ushort)0x0400) -#define BD_ENET_TX_DEF ((ushort)0x0200) -#define BD_ENET_TX_HB ((ushort)0x0100) -#define BD_ENET_TX_LC ((ushort)0x0080) -#define BD_ENET_TX_RL ((ushort)0x0040) -#define BD_ENET_TX_RCMASK ((ushort)0x003c) -#define BD_ENET_TX_UN ((ushort)0x0002) -#define BD_ENET_TX_CSL ((ushort)0x0001) -#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ - -/*enhanced buffer descriptor control/status used by Ethernet transmit*/ -#define BD_ENET_TX_INT 0x40000000 -#define BD_ENET_TX_TS 0x20000000 -#define BD_ENET_TX_PINS 0x10000000 -#define BD_ENET_TX_IINS 0x08000000 + */ +#define BD_ENET_TX_READY ((ushort)0x8000) +#define BD_ENET_TX_PAD ((ushort)0x4000) +#define BD_ENET_TX_WRAP ((ushort)0x2000) +#define BD_ENET_TX_INTR ((ushort)0x1000) +#define BD_ENET_TX_LAST ((ushort)0x0800) +#define BD_ENET_TX_TC ((ushort)0x0400) +#define BD_ENET_TX_DEF ((ushort)0x0200) +#define BD_ENET_TX_HB ((ushort)0x0100) +#define BD_ENET_TX_LC ((ushort)0x0080) +#define BD_ENET_TX_RL ((ushort)0x0040) +#define BD_ENET_TX_RCMASK ((ushort)0x003c) +#define BD_ENET_TX_UN ((ushort)0x0002) +#define BD_ENET_TX_CSL ((ushort)0x0001) +#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ + +/* enhanced buffer descriptor control/status used by Ethernet transmit */ +#define BD_ENET_TX_INT 0x40000000 +#define BD_ENET_TX_TS 0x20000000 +#define BD_ENET_TX_PINS 0x10000000 +#define BD_ENET_TX_IINS 0x08000000 /* This device has up to three irqs on some platforms */ @@ -279,36 +283,40 @@ struct bufdesc_ex { #define FEC_ENET_MAX_TX_QS 3 #define FEC_ENET_MAX_RX_QS 3 -#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \ - ((X == 2) ? \ +#define FEC_R_DES_START(X) (((X) == 1) ? FEC_R_DES_START_1 : \ + (((X) == 2) ? \ FEC_R_DES_START_2 : FEC_R_DES_START_0)) -#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \ - ((X == 2) ? \ +#define FEC_X_DES_START(X) (((X) == 1) ? FEC_X_DES_START_1 : \ + (((X) == 2) ? \ FEC_X_DES_START_2 : FEC_X_DES_START_0)) -#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \ - ((X == 2) ? \ +#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \ + (((X) == 2) ? \ + FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0)) +#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \ + (((X) == 2) ? \ FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) -#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \ - ((X == 2) ? \ +#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \ + (((X) == 2) ? \ FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) -#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) +#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) #define DMA_CLASS_EN (1 << 16) -#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1) -#define IDLE_SLOPE_MASK 0xFFFF +#define FEC_RCMR(X) (((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1) +#define IDLE_SLOPE_MASK 0xffff #define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */ #define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */ -#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ +#define IDLE_SLOPE(X) (((X) == 1) ? \ + (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ (IDLE_SLOPE_2 & IDLE_SLOPE_MASK)) -#define RCMR_MATCHEN (0x1 << 16) -#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2)) +#define RCMR_MATCHEN (0x1 << 16) +#define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2)) #define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \ RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3)) #define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \ RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3)) -#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2) -#define FEC_TX_BD_FTYPE(X) ((X & 0xF) << 20) +#define RCMR_CMP(X) (((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2) +#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20) /* The number of Tx and Rx buffers. These are allocated from the page * pool. The code may assume these are power of two, so it it best @@ -326,8 +334,8 @@ struct bufdesc_ex { #define TX_RING_SIZE 512 /* Must be power of two */ #define TX_RING_MOD_MASK 511 /* for this to work */ -#define BD_ENET_RX_INT 0x00800000 -#define BD_ENET_RX_PTP ((ushort)0x0400) +#define BD_ENET_RX_INT 0x00800000 +#define BD_ENET_RX_PTP ((ushort)0x0400) #define BD_ENET_RX_ICE 0x00000020 #define BD_ENET_RX_PCR 0x00000010 #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) @@ -359,13 +367,13 @@ struct bufdesc_ex { /* ENET interrupt coalescing macro define */ #define FEC_ITR_CLK_SEL (0x1 << 30) #define FEC_ITR_EN (0x1 << 31) -#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20) -#define FEC_ITR_ICTT(X) ((X) & 0xFFFF) +#define FEC_ITR_ICFT(X) (((X) & 0xff) << 20) +#define FEC_ITR_ICTT(X) ((X) & 0xffff) #define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */ #define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */ -#define FEC_VLAN_TAG_LEN 0x04 -#define FEC_ETHTYPE_LEN 0x02 +#define FEC_VLAN_TAG_LEN 0x04 +#define FEC_ETHTYPE_LEN 0x02 /* Controller is ENET-MAC */ #define FEC_QUIRK_ENET_MAC (1 << 0) @@ -390,7 +398,7 @@ struct bufdesc_ex { * frames not being transmitted until there is a 0-to-1 transition on * ENET_TDAR[TDAR]. */ -#define FEC_QUIRK_ERR006358 (1 << 7) +#define FEC_QUIRK_ERR006358 (1 << 7) /* ENET IP hw AVB * * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. @@ -501,8 +509,9 @@ struct fec_enet_private { int speed; struct completion mdio_done; int irq[FEC_IRQ_NUM]; - int bufdesc_ex; + bool bufdesc_ex; int pause_flag; + u32 quirks; struct napi_struct napi; int csum_flags; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3dca494797bd2ebb8a7dd6c1983b7f19e72e481c..8c5b15ee5ed733a567eb16bb20eb7d20395bf4ec 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -287,15 +287,13 @@ static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, return entries > 0 ? entries : entries + txq->tx_ring_size; } -static void *swap_buffer(void *bufaddr, int len) +static void swap_buffer(void *bufaddr, int len) { int i; unsigned int *buf = bufaddr; - for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++) - *buf = cpu_to_be32(*buf); - - return bufaddr; + for (i = 0; i < len; i += 4, buf++) + swab32s(buf); } static void swap_buffer2(void *dst_buf, void *src_buf, int len) @@ -361,8 +359,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct bufdesc *bdp = txq->cur_tx; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; @@ -398,7 +394,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, } if (fep->bufdesc_ex) { - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + if (fep->quirks & FEC_QUIRK_HAS_AVB) estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -410,11 +406,11 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); if (((unsigned long) bufaddr) & fep->tx_align || - id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], bufaddr, frag_len); bufaddr = txq->tx_bounce[index]; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, frag_len); } @@ -450,8 +446,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); int nr_frags = skb_shinfo(skb)->nr_frags; struct bufdesc *bdp, *last_bdp; void *bufaddr; @@ -490,11 +484,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, queue = skb_get_queue_mapping(skb); index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); if (((unsigned long) bufaddr) & fep->tx_align || - id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], skb->data, buflen); bufaddr = txq->tx_bounce[index]; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, buflen); } @@ -529,7 +523,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, fep->hwts_tx_en)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + if (fep->quirks & FEC_QUIRK_HAS_AVB) estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -573,8 +567,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, int size, bool last_tcp, bool is_last) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); unsigned short queue = skb_get_queue_mapping(skb); unsigned short status; @@ -587,11 +579,11 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); if (((unsigned long) data) & fep->tx_align || - id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], data, size); data = txq->tx_bounce[index]; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, size); } @@ -607,7 +599,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, bdp->cbd_bufaddr = addr; if (fep->bufdesc_ex) { - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + if (fep->quirks & FEC_QUIRK_HAS_AVB) estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -635,8 +627,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, struct bufdesc *bdp, int index) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); unsigned short queue = skb_get_queue_mapping(skb); @@ -652,11 +642,11 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; if (((unsigned long)bufaddr) & fep->tx_align || - id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], skb->data, hdr_len); bufaddr = txq->tx_bounce[index]; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, hdr_len); dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, @@ -673,7 +663,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, bdp->cbd_datlen = hdr_len; if (fep->bufdesc_ex) { - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + if (fep->quirks & FEC_QUIRK_HAS_AVB) estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -698,8 +688,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct tso_t tso; unsigned int index = 0; int ret; - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { dev_kfree_skb_any(skb); @@ -761,7 +749,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, txq->cur_tx = bdp; /* Trigger transmission start */ - if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) || + if (!(fep->quirks & FEC_QUIRK_ERR007885) || !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || @@ -879,6 +867,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_rx_queues; i++) { rxq = fep->rx_queue[i]; writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); /* enable DMA1/2 */ if (i) @@ -924,8 +913,6 @@ static void fec_restart(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); u32 val; u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; @@ -935,7 +922,7 @@ fec_restart(struct net_device *ndev) * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. */ - if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) { writel(0, fep->hwp + FEC_ECNTRL); } else { writel(1, fep->hwp + FEC_ECNTRL); @@ -946,17 +933,14 @@ fec_restart(struct net_device *ndev) * enet-mac reset will reset mac address registers too, * so need to reconfigure it. */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); } /* Clear any outstanding interrupt. */ - writel(0xffc00000, fep->hwp + FEC_IEVENT); - - /* Set maximum receive buffer size. */ - writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); + writel(0xffffffff, fep->hwp + FEC_IEVENT); fec_enet_bd_init(ndev); @@ -992,7 +976,7 @@ fec_restart(struct net_device *ndev) * The phy interface and speed need to get configured * differently on enet-mac. */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* Enable flow control and length check */ rcntl |= 0x40000000 | 0x00000020; @@ -1015,7 +999,7 @@ fec_restart(struct net_device *ndev) } } else { #ifdef FEC_MIIGSK_ENR - if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { + if (fep->quirks & FEC_QUIRK_USE_GASKET) { u32 cfgr; /* disable the gasket and wait */ writel(0, fep->hwp + FEC_MIIGSK_ENR); @@ -1068,7 +1052,7 @@ fec_restart(struct net_device *ndev) writel(0, fep->hwp + FEC_HASH_TABLE_LOW); #endif - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* enable ENET endian swap */ ecntl |= (1 << 8); /* enable ENET store and forward mode */ @@ -1091,7 +1075,10 @@ fec_restart(struct net_device *ndev) fec_ptp_start_cyclecounter(ndev); /* Enable interrupts we wish to service */ - writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + if (fep->link) + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + else + writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); /* Init the interrupt coalescing */ fec_enet_itr_coal_init(ndev); @@ -1102,8 +1089,6 @@ static void fec_stop(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); /* We cannot expect a graceful transmit stop without link !!! */ @@ -1118,7 +1103,7 @@ fec_stop(struct net_device *ndev) * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. */ - if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) { writel(0, fep->hwp + FEC_ECNTRL); } else { writel(1, fep->hwp + FEC_ECNTRL); @@ -1128,7 +1113,7 @@ fec_stop(struct net_device *ndev) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { writel(2, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } @@ -1350,8 +1335,6 @@ static int fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned short status; @@ -1365,7 +1348,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) u16 vlan_tag; int index = 0; bool is_copybreak; - bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME; + bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; #ifdef CONFIG_M532x flush_cache_all(); @@ -1880,8 +1863,6 @@ failed_clk_ipg: static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct phy_device *phy_dev = NULL; char mdio_bus_id[MII_BUS_ID_SIZE]; char phy_name[MII_BUS_ID_SIZE + 3]; @@ -1894,6 +1875,8 @@ static int fec_enet_mii_probe(struct net_device *ndev) phy_dev = of_phy_connect(ndev, fep->phy_node, &fec_enet_adjust_link, 0, fep->phy_interface); + if (!phy_dev) + return -ENODEV; } else { /* check for attached phy */ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { @@ -1927,7 +1910,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) } /* mask with MAC supported features */ - if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { + if (fep->quirks & FEC_QUIRK_HAS_GBIT) { phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->supported &= ~SUPPORTED_1000baseT_Half; #if !defined(CONFIG_M5272) @@ -1955,8 +1938,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) static struct mii_bus *fec0_mii_bus; struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct device_node *node; int err = -ENXIO, i; @@ -1976,7 +1957,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) * mdio interface in board design, and need to be configured by * fec0 mii_bus. */ - if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { + if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { /* fec1 uses fec0 mii_bus */ if (mii_cnt && fec0_mii_bus) { fep->mii_bus = fec0_mii_bus; @@ -1997,7 +1978,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) * document. */ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + if (fep->quirks & FEC_QUIRK_ENET_MAC) fep->phy_speed--; fep->phy_speed <<= 1; writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); @@ -2039,7 +2020,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) mii_cnt++; /* save fec0 mii_bus */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + if (fep->quirks & FEC_QUIRK_ENET_MAC) fec0_mii_bus = fep->mii_bus; return 0; @@ -2308,11 +2289,9 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) static void fec_enet_itr_coal_set(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); int rx_itr, tx_itr; - if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return; /* Must be greater than zero to avoid unpredictable behavior */ @@ -2347,10 +2326,8 @@ static int fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return -EOPNOTSUPP; ec->rx_coalesce_usecs = fep->rx_time_itr; @@ -2366,12 +2343,9 @@ static int fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - unsigned int cycle; - if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return -EOPNOTSUPP; if (ec->rx_max_coalesced_frames > 255) { @@ -2951,8 +2925,6 @@ static const struct net_device_ops fec_netdev_ops = { static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_rx_q *rxq; struct bufdesc *cbd_base; @@ -3031,11 +3003,11 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); - if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) + if (fep->quirks & FEC_QUIRK_HAS_VLAN) /* enable hw VLAN support */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; - if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { + if (fep->quirks & FEC_QUIRK_HAS_CSUM) { ndev->gso_max_segs = FEC_MAX_TSO_SEGS; /* enable hw accelerator */ @@ -3044,7 +3016,7 @@ static int fec_enet_init(struct net_device *ndev) fep->csum_flags |= FLAG_RX_CSUM_ENABLED; } - if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) { fep->tx_align = 0; fep->rx_align = 0x3f; } @@ -3144,10 +3116,6 @@ fec_probe(struct platform_device *pdev) int num_tx_qs; int num_rx_qs; - of_id = of_match_device(fec_dt_ids, &pdev->dev); - if (of_id) - pdev->id_entry = of_id->data; - fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); /* Init network device */ @@ -3161,13 +3129,17 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); + of_id = of_match_device(fec_dt_ids, &pdev->dev); + if (of_id) + pdev->id_entry = of_id->data; + fep->quirks = pdev->id_entry->driver_data; + fep->num_rx_queues = num_rx_qs; fep->num_tx_queues = num_tx_qs; #if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ - if (pdev->id_entry && - (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) + if (fep->quirks & FEC_QUIRK_HAS_GBIT) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; #endif @@ -3184,8 +3156,6 @@ fec_probe(struct platform_device *pdev) fep->pdev = pdev; fep->dev_id = dev_id++; - fep->bufdesc_ex = 0; - platform_set_drvdata(pdev, ndev); phy_node = of_parse_phandle(np, "phy-handle", 0); @@ -3238,12 +3208,11 @@ fec_probe(struct platform_device *pdev) if (IS_ERR(fep->clk_ref)) fep->clk_ref = NULL; + fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); - fep->bufdesc_ex = - pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; if (IS_ERR(fep->clk_ptp)) { fep->clk_ptp = NULL; - fep->bufdesc_ex = 0; + fep->bufdesc_ex = false; } ret = fec_enet_clk_enable(ndev, true); @@ -3366,6 +3335,12 @@ static int __maybe_unused fec_suspend(struct device *dev) if (fep->reg_phy) regulator_disable(fep->reg_phy); + /* SOC supply clock to phy, when clock is disabled, phy link down + * SOC control phy regulator, when regulator is disabled, phy link down + */ + if (fep->clk_enet_out || fep->reg_phy) + fep->link = 0; + return 0; } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4fdf0aa16978baf237ae8f778ed941792e922d90..7402ab12e46bb0db722088c8591a629c4b9536af 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -116,9 +116,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); -struct sk_buff *gfar_new_skb(struct net_device *dev); -static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, - struct sk_buff *skb); +struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); @@ -173,11 +171,14 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, static int gfar_init_bds(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *txbdp; struct rxbd8 *rxbdp; + u32 *rfbptr; int i, j; + dma_addr_t bufaddr; for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; @@ -201,6 +202,7 @@ static int gfar_init_bds(struct net_device *ndev) txbdp->status |= TXBD_WRAP; } + rfbptr = ®s->rfbptr0; for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; rx_queue->cur_rx = rx_queue->rx_bd_base; @@ -211,22 +213,22 @@ static int gfar_init_bds(struct net_device *ndev) struct sk_buff *skb = rx_queue->rx_skbuff[j]; if (skb) { - gfar_init_rxbdp(rx_queue, rxbdp, - rxbdp->bufPtr); + bufaddr = rxbdp->bufPtr; } else { - skb = gfar_new_skb(ndev); + skb = gfar_new_skb(ndev, &bufaddr); if (!skb) { netdev_err(ndev, "Can't allocate RX buffers\n"); return -ENOMEM; } rx_queue->rx_skbuff[j] = skb; - - gfar_new_rxbdp(rx_queue, rxbdp, skb); } + gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); rxbdp++; } + rx_queue->rfbptr = rfbptr; + rfbptr += 2; } return 0; @@ -336,6 +338,20 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv) } } +static void gfar_init_rqprm(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; + int i; + + baddr = ®s->rqprm0; + for (i = 0; i < priv->num_rx_queues; i++) { + gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | + (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); + baddr++; + } +} + static void gfar_rx_buff_size_config(struct gfar_private *priv) { int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; @@ -396,6 +412,13 @@ static void gfar_mac_rx_config(struct gfar_private *priv) if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; + /* Clear the LFC bit */ + gfar_write(®s->rctrl, rctrl); + /* Init flow control threshold values */ + gfar_init_rqprm(priv); + gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); + rctrl |= RCTRL_LFC; + /* Init rctrl based on our settings */ gfar_write(®s->rctrl, rctrl); } @@ -1687,6 +1710,9 @@ static int init_phy(struct net_device *dev) priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); priv->phydev->advertising = priv->phydev->supported; + /* Add support for flow control, but don't advertise it by default */ + priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + return 0; } @@ -2290,6 +2316,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 0, frag_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) + goto dma_map_err; /* set the TxBD length and buffer pointer */ txbdp->bufPtr = bufaddr; @@ -2339,8 +2367,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) fcb->ptp = 1; } - txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); + bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) + goto dma_map_err; + + txbdp_start->bufPtr = bufaddr; /* If time stamping is requested one additional TxBD must be set up. The * first TxBD points to the FCB and must have a data length of @@ -2406,6 +2438,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&tx_queue->txlock, flags); return NETDEV_TX_OK; + +dma_map_err: + txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); + if (do_tstamp) + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + for (i = 0; i < nr_frags; i++) { + lstatus = txbdp->lstatus; + if (!(lstatus & BD_LFLAG(TXBD_READY))) + break; + + txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY); + bufaddr = txbdp->bufPtr; + dma_unmap_page(priv->dev, bufaddr, txbdp->length, + DMA_TO_DEVICE); + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + } + gfar_wmb(); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } /* Stops the kernel queue, and halts the controller */ @@ -2606,18 +2657,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) netdev_tx_completed_queue(txq, howmany, bytes_sent); } -static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, - struct sk_buff *skb) -{ - struct net_device *dev = rx_queue->dev; - struct gfar_private *priv = netdev_priv(dev); - dma_addr_t buf; - - buf = dma_map_single(priv->dev, skb->data, - priv->rx_buffer_size, DMA_FROM_DEVICE); - gfar_init_rxbdp(rx_queue, bdp, buf); -} - static struct sk_buff *gfar_alloc_skb(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); @@ -2632,9 +2671,25 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev) return skb; } -struct sk_buff *gfar_new_skb(struct net_device *dev) +struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) { - return gfar_alloc_skb(dev); + struct gfar_private *priv = netdev_priv(dev); + struct sk_buff *skb; + dma_addr_t addr; + + skb = gfar_alloc_skb(dev); + if (!skb) + return NULL; + + addr = dma_map_single(priv->dev, skb->data, + priv->rx_buffer_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, addr))) { + dev_kfree_skb_any(skb); + return NULL; + } + + *bufaddr = addr; + return skb; } static inline void count_errors(unsigned short status, struct net_device *dev) @@ -2805,11 +2860,12 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { struct sk_buff *newskb; + dma_addr_t bufaddr; rmb(); /* Add another skb for the future */ - newskb = gfar_new_skb(dev); + newskb = gfar_new_skb(dev, &bufaddr); skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; @@ -2825,9 +2881,10 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) bdp->status & RXBD_ERR)) { count_errors(bdp->status, dev); - if (unlikely(!newskb)) + if (unlikely(!newskb)) { newskb = skb; - else if (skb) + bufaddr = bdp->bufPtr; + } else if (skb) dev_kfree_skb(skb); } else { /* Increment the number of packets */ @@ -2854,7 +2911,11 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; /* Setup the new bdp */ - gfar_new_rxbdp(rx_queue, bdp, newskb); + gfar_init_rxbdp(rx_queue, bdp, bufaddr); + + /* Update Last Free RxBD pointer for LFC */ + if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) + gfar_write(rx_queue->rfbptr, (u32)bdp); /* Update to the next pointer */ bdp = next_bd(bdp, base, rx_queue->rx_ring_size); @@ -3370,7 +3431,11 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = mii_advertise_flowctrl(phydev->advertising); + lcl_adv = 0; + if (phydev->advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (phydev->advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) @@ -3386,6 +3451,9 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; struct phy_device *phydev = priv->phydev; + struct gfar_priv_rx_q *rx_queue = NULL; + int i; + struct rxbd8 *bdp; if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) return; @@ -3394,6 +3462,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) u32 tempval1 = gfar_read(®s->maccfg1); u32 tempval = gfar_read(®s->maccfg2); u32 ecntrl = gfar_read(®s->ecntrl); + u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW); if (phydev->duplex != priv->oldduplex) { if (!(phydev->duplex)) @@ -3438,6 +3507,26 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); tempval1 |= gfar_get_flowctrl_cfg(priv); + /* Turn last free buffer recording on */ + if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + bdp = rx_queue->cur_rx; + /* skip to previous bd */ + bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, + rx_queue->rx_bd_base, + rx_queue->rx_ring_size); + + if (rx_queue->rfbptr) + gfar_write(rx_queue->rfbptr, (u32)bdp); + } + + priv->tx_actual_en = 1; + } + + if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) + priv->tx_actual_en = 0; + gfar_write(®s->maccfg1, tempval1); gfar_write(®s->maccfg2, tempval); gfar_write(®s->ecntrl, ecntrl); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 2805cfbf1765147487c2f3da6390858803c5cd33..b581b8823a2a98a0ff8bc82ba71e41da20d1abaf 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -99,6 +99,10 @@ extern const char gfar_driver_version[]; #define GFAR_MAX_FIFO_STARVE 511 #define GFAR_MAX_FIFO_STARVE_OFF 511 +#define FBTHR_SHIFT 24 +#define DEFAULT_RX_LFC_THR 16 +#define DEFAULT_LFC_PTVVAL 4 + #define DEFAULT_RX_BUFFER_SIZE 1536 #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) @@ -145,9 +149,7 @@ extern const char gfar_driver_version[]; | SUPPORTED_Autoneg \ | SUPPORTED_MII) -#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \ - | SUPPORTED_Pause \ - | SUPPORTED_Asym_Pause) +#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full /* TBI register addresses */ #define MII_TBICON 0x11 @@ -275,6 +277,7 @@ extern const char gfar_driver_version[]; #define RCTRL_TS_ENABLE 0x01000000 #define RCTRL_PAL_MASK 0x001f0000 +#define RCTRL_LFC 0x00004000 #define RCTRL_VLEX 0x00002000 #define RCTRL_FILREN 0x00001000 #define RCTRL_GHTX 0x00000400 @@ -851,7 +854,32 @@ struct gfar { u8 res23c[248]; u32 attr; /* 0x.bf8 - Attributes Register */ u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ - u8 res24[688]; + u32 rqprm0; /* 0x.c00 - Receive queue parameters register 0 */ + u32 rqprm1; /* 0x.c04 - Receive queue parameters register 1 */ + u32 rqprm2; /* 0x.c08 - Receive queue parameters register 2 */ + u32 rqprm3; /* 0x.c0c - Receive queue parameters register 3 */ + u32 rqprm4; /* 0x.c10 - Receive queue parameters register 4 */ + u32 rqprm5; /* 0x.c14 - Receive queue parameters register 5 */ + u32 rqprm6; /* 0x.c18 - Receive queue parameters register 6 */ + u32 rqprm7; /* 0x.c1c - Receive queue parameters register 7 */ + u8 res24[36]; + u32 rfbptr0; /* 0x.c44 - Last free RxBD pointer for ring 0 */ + u8 res24a[4]; + u32 rfbptr1; /* 0x.c4c - Last free RxBD pointer for ring 1 */ + u8 res24b[4]; + u32 rfbptr2; /* 0x.c54 - Last free RxBD pointer for ring 2 */ + u8 res24c[4]; + u32 rfbptr3; /* 0x.c5c - Last free RxBD pointer for ring 3 */ + u8 res24d[4]; + u32 rfbptr4; /* 0x.c64 - Last free RxBD pointer for ring 4 */ + u8 res24e[4]; + u32 rfbptr5; /* 0x.c6c - Last free RxBD pointer for ring 5 */ + u8 res24f[4]; + u32 rfbptr6; /* 0x.c74 - Last free RxBD pointer for ring 6 */ + u8 res24g[4]; + u32 rfbptr7; /* 0x.c7c - Last free RxBD pointer for ring 7 */ + u8 res24h[4]; + u8 res24x[556]; u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */ u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */ u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */ @@ -1011,6 +1039,7 @@ struct gfar_priv_rx_q { /* RX Coalescing values */ unsigned char rxcoalescing; unsigned long rxic; + u32 *rfbptr; }; enum gfar_irqinfo_id { @@ -1101,6 +1130,7 @@ struct gfar_private { unsigned int num_tx_queues; unsigned int num_rx_queues; unsigned int num_grps; + int tx_actual_en; /* Network Statistics */ struct gfar_extra_stats extra_stats; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 76d70708f864af66b4a525e671f021e0d30a2b7d..3e1a9c1a67a95ffdaddbcca9739a9e37dee66eac 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -579,8 +579,13 @@ static int gfar_spauseparam(struct net_device *dev, u32 tempval; tempval = gfar_read(®s->maccfg1); tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - if (priv->tx_pause_en) + + priv->tx_actual_en = 0; + if (priv->tx_pause_en) { + priv->tx_actual_en = 1; tempval |= MACCFG1_TX_FLOW; + } + if (priv->rx_pause_en) tempval |= MACCFG1_RX_FLOW; gfar_write(®s->maccfg1, tempval); diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 76a6e0c77d69e1a01544a388b465e7e82ddbfd87..ae6e30d39f0f6b07ac8b9b2fec78bd9eb250e535 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -490,7 +490,8 @@ static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus, eid = hp100_read_id(ioaddr); if (eid == NULL) { /* bad checksum? */ - printk(KERN_WARNING "hp100_probe: bad ID checksum at base port 0x%x\n", ioaddr); + printk(KERN_WARNING "%s: bad ID checksum at base port 0x%x\n", + __func__, ioaddr); goto out2; } @@ -498,7 +499,9 @@ static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus, for (i = uc = 0; i < 7; i++) uc += hp100_inb(LAN_ADDR + i); if (uc != 0xff) { - printk(KERN_WARNING "hp100_probe: bad lan address checksum at port 0x%x)\n", ioaddr); + printk(KERN_WARNING + "%s: bad lan address checksum at port 0x%x)\n", + __func__, ioaddr); err = -EIO; goto out2; } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 87bd953cc2eeaef7f6af65902841645e98f7eda5..3f3fba9e465076220ba2a3644e8f240bbf5dda0a 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2323,16 +2323,11 @@ static int emac_check_deps(struct emac_instance *dev, static void emac_put_deps(struct emac_instance *dev) { - if (dev->mal_dev) - of_dev_put(dev->mal_dev); - if (dev->zmii_dev) - of_dev_put(dev->zmii_dev); - if (dev->rgmii_dev) - of_dev_put(dev->rgmii_dev); - if (dev->mdio_dev) - of_dev_put(dev->mdio_dev); - if (dev->tah_dev) - of_dev_put(dev->tah_dev); + of_dev_put(dev->mal_dev); + of_dev_put(dev->zmii_dev); + of_dev_put(dev->rgmii_dev); + of_dev_put(dev->mdio_dev); + of_dev_put(dev->tah_dev); } static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action, @@ -2371,8 +2366,7 @@ static int emac_wait_deps(struct emac_instance *dev) bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier); err = emac_check_deps(dev, deps) ? 0 : -ENODEV; for (i = 0; i < EMAC_DEP_COUNT; i++) { - if (deps[i].node) - of_node_put(deps[i].node); + of_node_put(deps[i].node); if (err && deps[i].ofdev) of_dev_put(deps[i].ofdev); } @@ -2383,8 +2377,7 @@ static int emac_wait_deps(struct emac_instance *dev) dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev; dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev; } - if (deps[EMAC_DEP_PREV_IDX].ofdev) - of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev); + of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev); return err; } @@ -3113,8 +3106,7 @@ static void __exit emac_exit(void) /* Destroy EMAC boot list */ for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) - if (emac_boot_list[i]) - of_node_put(emac_boot_list[i]); + of_node_put(emac_boot_list[i]); } module_init(emac_init); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 24f3986cfae2950f582d3a5e4644c4e1b666391e..83140cbb5f0119d6ab478e3491a44c4813f152f7 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3136,12 +3136,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, * packets may get corrupted during padding by HW. * To WA this issue, pad all small packets manually. */ - if (skb->len < ETH_ZLEN) { - if (skb_pad(skb, ETH_ZLEN - skb->len)) - return NETDEV_TX_OK; - skb->len = ETH_ZLEN; - skb_set_tail_pointer(skb, ETH_ZLEN); - } + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; mss = skb_shinfo(skb)->gso_size; /* The controller does a simple calculation to @@ -4104,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, unsigned int bufsz) { - struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz); + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); if (unlikely(!skb)) adapter->alloc_rx_buff_failed++; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 952ef7c434e814a81f7b016f4b88abc090ff5e26..e14fd85f64eb4b9e2817f7fb9dc9d360ad093e36 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, */ if (length < copybreak) { struct sk_buff *new_skb = - netdev_alloc_skb_ip_align(netdev, length); + napi_alloc_skb(&adapter->napi, length); if (new_skb) { skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, @@ -3449,15 +3449,12 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; + u32 rss_key[10]; int i; - static const u32 rsskey[10] = { - 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0, - 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe - }; - /* Fill out hash function seed */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i < 10; i++) - ew32(RSSRK(i), rsskey[i]); + ew32(RSSRK(i), rss_key[i]); /* Direct all traffic to queue 0 */ for (i = 0; i < 32; i++) @@ -5557,12 +5554,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* The minimum packet size with TCTL.PSP set is 17 bytes so * pad skb in order to meet this minimum size requirement */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; mss = skb_shinfo(skb)->gso_size; if (mss) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2d04464e6aa32f0bed0f491c621376a4f52afd80..651f53bc737686118e1f2fc457f452de59260998 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -916,11 +916,15 @@ static u32 fm10k_get_rssrk_size(struct net_device *netdev) return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; } -static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key) +static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct fm10k_intfc *interface = netdev_priv(netdev); int i, err; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + err = fm10k_get_reta(netdev, indir); if (err || !key) return err; @@ -932,12 +936,16 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key) } static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_hw *hw = &interface->hw; int i, err; + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + err = fm10k_set_reta(netdev, indir); if (err || !key) return err; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index e645af412e76bda4622f09bd946f98003e181c45..ee1ecb146df7de40e126cbffca7c80528254ef8e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -83,7 +83,7 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, return true; /* alloc new page for storage */ - page = alloc_page(GFP_ATOMIC | __GFP_COLD); + page = dev_alloc_page(); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; @@ -308,8 +308,8 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, #endif /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - FM10K_RX_HDR_LEN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + FM10K_RX_HDR_LEN); if (unlikely(!skb)) { rx_ring->rx_stats.alloc_failed++; return NULL; @@ -578,14 +578,9 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, if (skb_is_nonlinear(skb)) fm10k_pull_tail(rx_ring, rx_desc, skb); - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index a0cb74ab3dc617d0262ebaf4df9c52bc0b159093..4f5892cc32d70c15b7911e0975eb6f39a5f5b6fb 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1551,15 +1551,11 @@ void fm10k_down(struct fm10k_intfc *interface) static int fm10k_sw_init(struct fm10k_intfc *interface, const struct pci_device_id *ent) { - static const u32 seed[FM10K_RSSRK_SIZE] = { 0xda565a6d, 0xc20e5b25, - 0x3d256741, 0xb08fa343, - 0xcb2bcad0, 0xb4307bae, - 0xa32dcb77, 0x0cf23080, - 0x3bb7426a, 0xfa01acbe }; const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; struct fm10k_hw *hw = &interface->hw; struct pci_dev *pdev = interface->pdev; struct net_device *netdev = interface->netdev; + u32 rss_key[FM10K_RSSRK_SIZE]; unsigned int rss; int err; @@ -1673,8 +1669,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, /* initialize vxlan_port list */ INIT_LIST_HEAD(&interface->vxlan_port); - /* initialize RSS key */ - memcpy(interface->rssrk, seed, sizeof(seed)); + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + memcpy(interface->rssrk, rss_key, sizeof(rss_key)); /* Start off interface as being down */ set_bit(__FM10K_DOWN, &interface->state); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index f1e33f896439b1d6ccbc5f2428d9aef05fcbcee3..fc50f6461b13e3bd2633eecf7b3a71538a432dc2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -87,7 +87,7 @@ #define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ #endif /* I40E_FCOE */ #define I40E_MAX_AQ_BUF_SIZE 4096 -#define I40E_AQ_LEN 32 +#define I40E_AQ_LEN 128 #define I40E_AQ_WORK_LIMIT 16 #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_MSG_ENABLE 4 @@ -146,6 +146,7 @@ enum i40e_state_t { __I40E_DOWN_REQUESTED, __I40E_FD_FLUSH_REQUESTED, __I40E_RESET_FAILED, + __I40E_PORT_TX_SUSPENDED, }; enum i40e_interrupt_policy { @@ -269,7 +270,8 @@ struct i40e_pf { u16 msg_enable; char misc_int_name[IFNAMSIZ + 9]; u16 adminq_work_limit; /* num of admin receive queue desc to process */ - int service_timer_period; + unsigned long service_timer_period; + unsigned long service_timer_previous; struct timer_list service_timer; struct work_struct service_task; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 72f5d25a222f0769ea514d5789407a543e66c3c2..77f6254a89ac6078136e7cd92af1f910981c3f6f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -51,7 +51,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) static void i40e_adminq_init_regs(struct i40e_hw *hw) { /* set head and tail registers in our local struct */ - if (hw->mac.type == I40E_MAC_VF) { + if (i40e_is_vf(hw)) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; hw->aq.asq.len = I40E_VF_ATQLEN1; @@ -617,7 +617,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); - hw->aq.nvm_busy = false; + hw->aq.nvm_release_on_done = false; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; ret_code = i40e_aq_set_hmc_resource_profile(hw, I40E_HMC_PROFILE_DEFAULT, @@ -754,12 +755,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, goto asq_send_command_exit; } - if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n"); - status = I40E_ERR_NVM; - goto asq_send_command_exit; - } - details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { *details = *cmd_details; @@ -853,7 +848,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, */ if (!details->async && !details->postpone) { u32 total_delay = 0; - u32 delay_len = 10; do { /* AQ designers suggest use of head for better @@ -861,9 +855,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, */ if (i40e_asq_done(hw)) break; - /* ugh! delay while spin_lock */ - udelay(delay_len); - total_delay += delay_len; + usleep_range(1000, 2000); + total_delay++; } while (total_delay < hw->aq.asq_cmd_timeout); } @@ -903,9 +896,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; } - if (!status && i40e_is_nvm_update_op(desc)) - hw->aq.nvm_busy = true; - asq_send_command_error: mutex_unlock(&hw->aq.asq_mutex); asq_send_command_exit: @@ -958,9 +948,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQRX: Queue is empty.\n"); ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } @@ -982,10 +969,10 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, e->desc = *desc; datalen = le16_to_cpu(desc->datalen); - e->msg_size = min(datalen, e->msg_size); - if (e->msg_buf != NULL && (e->msg_size != 0)) + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, - e->msg_size); + e->msg_len); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, @@ -1021,7 +1008,6 @@ clean_arq_element_out: mutex_unlock(&hw->aq.arq_mutex); if (i40e_is_nvm_update_op(&e->desc)) { - hw->aq.nvm_busy = false; if (hw->aq.nvm_release_on_done) { i40e_release_nvm(hw); hw->aq.nvm_release_on_done = false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index ba38a89c79d6e4ffc10d6d8c1132ca8517a62ebb..564d0b0192f789aad0b4af4ceed04fe9fd15cd06 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -28,6 +28,7 @@ #define _I40E_ADMINQ_H_ #include "i40e_osdep.h" +#include "i40e_status.h" #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ @@ -76,7 +77,8 @@ struct i40e_asq_cmd_details { /* ARQ event information */ struct i40e_arq_event_info { struct i40e_aq_desc desc; - u16 msg_size; + u16 msg_len; + u16 buf_len; u8 *msg_buf; }; @@ -93,7 +95,6 @@ struct i40e_adminq_info { u16 fw_min_ver; /* firmware minor version */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_busy; bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ @@ -108,7 +109,7 @@ struct i40e_adminq_info { * i40e_aq_rc_to_posix - convert errors to user-land codes * aq_rc: AdminQ error code to convert **/ -static inline int i40e_aq_rc_to_posix(u16 aq_rc) +static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc) -EFBIG, /* I40E_AQ_RC_EFBIG */ }; + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + return -ERANGE; return aq_to_posix[aq_rc]; } /* general information */ #define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ +#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 15f289f2917f70bb313501164f9c5fd8ef4cae05..8835aeeff23e13f1d1d0c78f73ed46442b05c40c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -33,8 +33,8 @@ * This file needs to comply with the Linux Kernel coding style. */ -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 struct i40e_aq_desc { __le16 flags; @@ -66,216 +66,217 @@ struct i40e_aq_desc { */ /* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ }; /* Admin Queue command opcodes */ enum i40e_admin_queue_opc { /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, + i40e_aqc_opc_clear_pxe_mode = 0x0110, /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, + i40e_aqc_opc_event_lan_overflow = 0x1001, /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, - i40e_aqc_opc_debug_modify_internals = 0xFF09, + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, + i40e_aqc_opc_debug_modify_internals = 0xFF09, }; /* command structures and indirect data structures */ @@ -302,7 +303,7 @@ enum i40e_admin_queue_opc { /* This macro is used extensively to ensure that command structures are 16 * bytes in length as they have to map to the raw array of that size. */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) /* internal (0x00XX) commands */ @@ -320,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); /* Send driver version (indirect 0x0002) */ struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); /* Queue Shutdown (direct 0x0003) */ struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); @@ -351,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); @@ -373,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); */ struct i40e_aqc_list_capabilites { u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; @@ -384,123 +385,123 @@ struct i40e_aqc_list_capabilites { I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; }; /* list of caps */ -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 - __le16 table_id; - __le32 pfpm_proxyfc; - __le32 ip_addr; - u8 mac_addr[6]; + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; }; /* Set NS Proxy Table Entry Command (indirect 0x0105) */ struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; }; /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; }; /* Manage MAC Address Read Command (indirect 0x0107) */ struct i40e_aqc_mac_address_read { __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); @@ -516,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); @@ -544,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); * command */ struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); @@ -556,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; }; struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; }; /* Get Switch Configuration (indirect 0x0200) @@ -591,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp { * the first in the array is the header, remainder are elements */ struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; }; /* Add Statistics (direct 0x0201) * Remove Statistics (direct 0x0202) */ struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); /* Get Switch Resource Allocation (indirect 0x0204) */ struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; }; /* Add VSI (indirect 0x0210) @@ -671,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp { * uses the same completion and data structure as Add VSI */ struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); @@ -706,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); struct i40e_aqc_vsi_properties_data { /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 - u8 queueing_opt_reserved[3]; + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; + u8 up_enable_bits; + u8 sched_reserved; /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress table */ - u8 cmd_reserved[8]; + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; + __le16 qs_handle[8]; #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; }; I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); @@ -830,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); * (IS_CTRL_PORT only works on add PV) */ struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); @@ -859,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); */ struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); /* Add VEB (direct 0x0230) */ struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 - u8 enable_tcs; - u8 reserved[9]; +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; + u8 reserved[6]; + __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); @@ -909,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); @@ -928,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); /* used for the command for most vlan commands */ struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); /* indirect data for command and response */ struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_macvlan_completion { @@ -978,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); */ struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; }; /* Add VLAN (indirect 0x0252) @@ -998,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data { * use the generic i40e_aqc_macvlan for the command */ struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; + __le16 vlan_tag; + u8 vlan_flags; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; }; /* Set VSI Promiscuous Modes (direct 0x0254) */ struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; + __le16 promiscuous_flags; + __le16 valid_flags; /* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); @@ -1059,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; + __le16 tag; + __le16 queue_number; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); @@ -1084,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; + __le16 tag; + u8 reserved[12]; }; /* Add multicast E-Tag (direct 0x0257) @@ -1097,22 +1097,22 @@ struct i40e_aqc_remove_tag { * and no external data */ struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; }; @@ -1120,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); @@ -1145,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); * and the generic direct completion structure */ struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; + __le16 queue; + u8 reserved[2]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); @@ -1179,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); * and the generic indirect completion structure */ struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); struct i40e_aqc_add_remove_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; union { struct { u8 reserved[12]; @@ -1205,49 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { u8 data[16]; } v6; } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 /* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 /* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 /* 0x0007 reserved */ /* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; }; struct i40e_aqc_remove_cloud_filters_completion { @@ -1269,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); struct i40e_aqc_add_delete_mirror_rule { __le16 seid; __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 __le16 num_entries; __le16 destination; /* VSI for add, rule id for delete */ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ @@ -1286,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); @@ -1302,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); * the command and response use the same descriptor structure */ struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); @@ -1321,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); * this generic struct to pass the SEID in param0 */ struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); @@ -1336,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp { /* Configure VSI BW limits (direct 0x0400) */ struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); @@ -1350,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; }; /* Query vsi bw configuration (indirect 0x0408) */ struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; }; /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); @@ -1411,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); * Disable Physical Port ETS (indirect 0x0415) */ struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; }; /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure Switching Component Bandwidth Allocation per Tc * (indirect 0x0417) */ struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; }; /* Query Switching Component Configuration (indirect 0x0418) */ struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; }; /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; + __le16 tc_bw_max[2]; + u8 reserved3[32]; }; /* Query Switching Component Bandwidth Allocation per Traffic Type * (indirect 0x041A) */ struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Suspend/resume port TX traffic @@ -1490,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp { * (indirect 0x041D) */ struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ }; /* Get and set the active HMC resource profile and status. * (direct 0x0500) and (direct 0x0501) */ struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); enum i40e_aq_hmc_profile { /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, }; -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 enum i40e_aq_phy_type { I40E_PHY_TYPE_SGMII = 0x0, @@ -1578,147 +1578,147 @@ struct i40e_aqc_module_desc { }; struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 reserved[3]; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; }; /* Set PHY Config (direct 0x0601) */ struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; + __le32 phy_type; + u8 link_speed; + u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ #define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 reserved[3]; + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); /* Set MAC Config command data structure (direct 0x0603) */ struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); /* Restart Auto-Negotiation (direct 0x605) */ struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); /* Set event mask command (direct 0x613) */ struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); @@ -1728,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); * Get Link Partner AN advt register (direct 0x0616) */ struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { - u8 command_flags; + u8 command_flags; #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ @@ -1757,15 +1757,15 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; /* NVM Read command (indirect 0x0701) @@ -1773,40 +1773,40 @@ enum i40e_aq_phy_reg_type { * NVM Update commands (indirect 0x0703) */ struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; + __le16 cmd_flags; #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 #define ANVM_READ_SINGLE_FEATURE 0 #define ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - u8 reserved[2]; - __le32 address_high; - __le32 address_low; + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + u8 reserved[2]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); /* NVM Config Write (indirect 0x0705) */ struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); @@ -1831,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field { * Send to Peer PF command (indirect 0x0803) */ struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); @@ -1870,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); * uses i40e_aq_desc */ struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; + __le16 cmd_flags; #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { - __le32 mode; + __le32 mode; #define I40E_AQ_ALTERNATE_MODE_NONE 0 #define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); @@ -1896,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); /* Lan Queue Overflow Event (direct, 0x1001) */ struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); /* Get LLDP MIB (indirect 0x0A00) */ struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); @@ -1931,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); * also used for the event (with type in the command field) */ struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); @@ -1945,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); * Delete LLDP TLV (indirect 0x0A04) */ struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); /* Update LLDP TLV (indirect 0x0A03) */ struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); @@ -1981,57 +1981,97 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); /* Start LLDP (direct 0x0A06) */ struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); -/* Apply MIB changes (0x0A07) - * uses the generic struc as it contains no data +/* Get CEE DCBX Oper Config (0x0A07) + * uses the generic descriptor struct + * returns below as indirect response */ +#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) +#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) +#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) +#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) +#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) +#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +struct i40e_aqc_get_cee_dcb_cfg_v1_resp { + u8 reserved1; + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 reserved2; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + u8 reserved3; + __le16 oper_app_prio; + u8 reserved4; + __le16 tlv_status; +}; + +I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp); + +struct i40e_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; + __le32 tlv_status; + u8 reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); + /* Add Udp Tunnel command and completion (direct 0x0B00) */ struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 - u8 reserved1[10]; + u8 reserved1[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; - u8 filter_entry_index; - u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 - u8 total_filters; - u8 reserved[11]; + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); /* remove UDP Tunnel command (0x0B01) */ struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 reserved2[13]; + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved1[11]; + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); @@ -2044,11 +2084,11 @@ struct i40e_aqc_tunnel_key_structure { u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 /* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 @@ -2061,21 +2101,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - u8 param_value2[8]; + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + u8 param_value2[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); @@ -2087,18 +2127,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); /* set test more (0xFF01, internal) */ struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); @@ -2151,21 +2191,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); #define I40E_AQ_CLUSTER_ID_ALTRAM 11 struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 30056b25d94e6c331d9456c7171e3b51b5b7e754..3d741ee99a2cdea92c96efee39fda62c541c9b3e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -50,6 +50,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: @@ -549,7 +550,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { i40e_status i40e_init_shared_code(struct i40e_hw *hw) { i40e_status status = 0; - u32 reg; + u32 port, ari, func_rid; i40e_set_mac_type(hw); @@ -562,18 +563,17 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) hw->phy.get_link_info = true; - /* Determine port number */ - reg = rd32(hw, I40E_PFGEN_PORTNUM); - reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> - I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT); - hw->port = (u8)reg; - - /* Determine the PF number based on the PCI fn */ - reg = rd32(hw, I40E_GLPCI_CAPSUP); - if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK) - hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func); + /* Determine port number and PF number*/ + port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) + >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; + hw->port = (u8)port; + ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> + I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; + func_rid = rd32(hw, I40E_PF_FUNC_RID); + if (ari) + hw->pf_id = (u8)(func_rid & 0xff); else - hw->pf_id = (u8)hw->bus.func; + hw->pf_id = (u8)(func_rid & 0x7); status = i40e_init_nvm(hw); return status; @@ -790,7 +790,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) } #define I40E_PF_RESET_WAIT_COUNT_A0 200 -#define I40E_PF_RESET_WAIT_COUNT 100 +#define I40E_PF_RESET_WAIT_COUNT 110 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure @@ -1419,6 +1419,33 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse) return status; } +/** + * i40e_aq_set_phy_int_mask + * @hw: pointer to the hw struct + * @mask: interrupt mask to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set link interrupt mask. + **/ +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, + u16 mask, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_int_mask *cmd = + (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_int_mask); + + cmd->event_mask = cpu_to_le16(mask); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + /** * i40e_aq_add_vsi * @hw: pointer to the hw struct @@ -2631,6 +2658,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, return status; } +/** + * i40e_aq_get_cee_dcb_config + * @hw: pointer to the hw struct + * @buff: response buffer that stores CEE operational configuration + * @buff_size: size of the buffer passed + * @cmd_details: pointer to command details structure or NULL + * + * Get CEE DCBX mode operational configuration from firmware + **/ +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, + cmd_details); + + return status; +} + /** * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct @@ -3188,6 +3243,26 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, return status; } +/** + * i40e_aq_resume_port_tx + * @hw: pointer to the hardware structure + * @cmd_details: pointer to command details structure or NULL + * + * Resume port's Tx traffic + **/ +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + /** * i40e_set_pci_config_data - store PCI bus info * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 036570d76176cd211ef2ee22328aeecd96b7174a..3ce43588592d99c7c5449b6cd0fb84a31177d84f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -59,7 +59,7 @@ i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { - struct i40e_ieee_ets_config *etscfg; + struct i40e_dcb_ets_config *etscfg; u8 *buf = tlv->tlvinfo; u16 offset = 0; u8 priority; @@ -406,6 +406,166 @@ free_mem: return ret; } +/** + * i40e_cee_to_dcb_v1_config + * @cee_cfg: pointer to CEE v1 response configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE v1 configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_v1_config( + struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add APPs if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** + * i40e_cee_to_dcb_config + * @cee_cfg: pointer to CEE configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_config( + struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add APPs if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + /** * i40e_get_dcb_config * @hw: pointer to the hw struct @@ -415,7 +575,44 @@ free_mem: i40e_status i40e_get_dcb_config(struct i40e_hw *hw) { i40e_status ret = 0; + struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; + struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; + + /* If Firmware version < v4.33 IEEE only */ + if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4)) + goto ieee; + + /* If Firmware version == v4.33 use old CEE struct */ + if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, + sizeof(cee_v1_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_v1_config(&cee_v1_cfg, + &hw->local_dcbx_config); + } + } else { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, + sizeof(cee_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_config(&cee_cfg, + &hw->local_dcbx_config); + } + } + + /* CEE mode not enabled try querying IEEE data */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + goto ieee; + else + goto out; +ieee: + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; /* Get Local DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->local_dcbx_config); @@ -426,6 +623,10 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = 0; + out: return ret; } @@ -439,10 +640,27 @@ out: i40e_status i40e_init_dcb(struct i40e_hw *hw) { i40e_status ret = 0; + struct i40e_lldp_variables lldp_cfg; + u8 adminstatus = 0; if (!hw->func_caps.dcb) return ret; + /* Read LLDP NVM area */ + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (ret) + return ret; + + /* Get the LLDP AdminStatus for the current port */ + adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); + adminstatus &= 0xF; + + /* LLDP agent disabled */ + if (!adminstatus) { + hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; + return ret; + } + /* Get DCBX status */ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); if (ret) @@ -454,6 +672,8 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) case I40E_DCBX_STATUS_IN_PROGRESS: /* Get current DCBX configuration */ ret = i40e_get_dcb_config(hw); + if (ret) + return ret; break; case I40E_DCBX_STATUS_DISABLED: return ret; @@ -470,3 +690,33 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) return ret; } + +/** + * i40e_read_lldp_cfg - read LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * + * Reads the LLDP configuration data from NVM + **/ +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg) +{ + i40e_status ret = 0; + u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); + + if (!lldp_cfg) + return I40E_ERR_PARAM; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, + sizeof(struct i40e_lldp_variables), + (u8 *)lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 34cf1c30c7ffdff1210cea04e0c2cbeb160ef399..e137e3fac8ee2fd280ffaac6ac5924365e3abbdc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -65,6 +65,11 @@ #define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) #define I40E_IEEE_ETS_PRIO_1_SHIFT 4 #define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) +#define I40E_CEE_PGID_PRIO_0_SHIFT 0 +#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT) +#define I40E_CEE_PGID_PRIO_1_SHIFT 4 +#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT) +#define I40E_CEE_PGID_STRICT 15 /* Defines for IEEE TSA types */ #define I40E_IEEE_TSA_STRICT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 00bc0cdb3a0304b87ec9935b148a81c683927f51..183dcb63ce98e14e5b504bed9874911b35ae18d3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -207,7 +207,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) * VSI **/ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, - struct i40e_ieee_app_priority_table *app) + struct i40e_dcb_app_priority_table *app) { struct net_device *dev = vsi->netdev; struct dcb_app sapp; @@ -229,7 +229,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, * Delete given APP from all the VSIs for given PF **/ static void i40e_dcbnl_del_app(struct i40e_pf *pf, - struct i40e_ieee_app_priority_table *app) + struct i40e_dcb_app_priority_table *app) { int v, err; for (v = 0; v < pf->num_alloc_vsi; v++) { @@ -252,7 +252,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf, * Find given APP in the DCB configuration **/ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, - struct i40e_ieee_app_priority_table *app) + struct i40e_dcb_app_priority_table *app) { int i; @@ -277,7 +277,7 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, void i40e_dcbnl_flush_apps(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) { - struct i40e_ieee_app_priority_table app; + struct i40e_dcb_app_priority_table app; struct i40e_dcbx_config *dcbxcfg; struct i40e_hw *hw = &pf->hw; int i; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 7067f4b9159c99e9720484ca2ea5d7fe574fed5f..433a55886ad29bfb1b357d47ad4953f9ddac6145 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -773,7 +773,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, { struct i40e_tx_desc *txd; union i40e_rx_desc *rxd; - struct i40e_ring ring; + struct i40e_ring *ring; struct i40e_vsi *vsi; int i; @@ -792,29 +792,32 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, vsi_seid); return; } - if (is_rx_ring) - ring = *vsi->rx_rings[ring_id]; - else - ring = *vsi->tx_rings[ring_id]; + + ring = kmemdup(is_rx_ring + ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], + sizeof(*ring), GFP_KERNEL); + if (!ring) + return; + if (cnt == 2) { dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); - for (i = 0; i < ring.count; i++) { + for (i = 0; i < ring->count; i++) { if (!is_rx_ring) { - txd = I40E_TX_DESC(&ring, i); + txd = I40E_TX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(&ring, i); + rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { - rxd = I40E_RX_DESC(&ring, i); + rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, @@ -823,26 +826,26 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, } } } else if (cnt == 3) { - if (desc_n >= ring.count || desc_n < 0) { + if (desc_n >= ring->count || desc_n < 0) { dev_info(&pf->pdev->dev, "descriptor %d not found\n", desc_n); return; } if (!is_rx_ring) { - txd = I40E_TX_DESC(&ring, desc_n); + txd = I40E_TX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(&ring, desc_n); + rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { - rxd = I40E_RX_DESC(&ring, desc_n); + rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, @@ -852,6 +855,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, } else { dev_info(&pf->pdev->dev, "dump desc rx/tx []\n"); } + kfree(ring); } /** @@ -894,90 +898,6 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, estats->tx_discards, estats->tx_errors); } -/** - * i40e_dbg_dump_stats - handles dump stats write into command datum - * @pf: the i40e_pf created in command write - * @stats: the stats structure to be dumped - **/ -static void i40e_dbg_dump_stats(struct i40e_pf *pf, - struct i40e_hw_port_stats *stats) -{ - int i; - - dev_info(&pf->pdev->dev, " stats:\n"); - dev_info(&pf->pdev->dev, - " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n", - stats->crc_errors, stats->illegal_bytes, stats->error_bytes); - dev_info(&pf->pdev->dev, - " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n", - stats->mac_local_faults, stats->mac_remote_faults, - stats->rx_length_errors); - dev_info(&pf->pdev->dev, - " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n", - stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx); - dev_info(&pf->pdev->dev, - " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n", - stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127); - dev_info(&pf->pdev->dev, - " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n", - stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023); - dev_info(&pf->pdev->dev, - " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n", - stats->rx_size_big, stats->rx_undersize, stats->rx_jabber); - dev_info(&pf->pdev->dev, - " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n", - stats->rx_fragments, stats->rx_oversize, stats->tx_size_64); - dev_info(&pf->pdev->dev, - " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n", - stats->tx_size_127, stats->tx_size_255, stats->tx_size_511); - dev_info(&pf->pdev->dev, - " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n", - stats->tx_size_1023, stats->tx_size_big, - stats->mac_short_packet_dropped); - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xon_rx[i], - i+1, stats->priority_xon_rx[i+1], - i+2, stats->priority_xon_rx[i+2], - i+3, stats->priority_xon_rx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xoff_rx[i], - i+1, stats->priority_xoff_rx[i+1], - i+2, stats->priority_xoff_rx[i+2], - i+3, stats->priority_xoff_rx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xon_tx[i], - i+1, stats->priority_xon_tx[i+1], - i+2, stats->priority_xon_tx[i+2], - i+3, stats->priority_xon_rx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xoff_tx[i], - i+1, stats->priority_xoff_tx[i+1], - i+2, stats->priority_xoff_tx[i+2], - i+3, stats->priority_xoff_tx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xon_2_xoff[i], - i+1, stats->priority_xon_2_xoff[i+1], - i+2, stats->priority_xon_2_xoff[i+2], - i+3, stats->priority_xon_2_xoff[i+3]); - } - - i40e_dbg_dump_eth_stats(pf, &stats->eth); -} - /** * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb * @pf: the i40e_pf created in command write @@ -1342,11 +1262,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, "dump desc rx []\n"); dev_info(&pf->pdev->dev, "dump desc aq\n"); } - } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) { - dev_info(&pf->pdev->dev, "pf stats:\n"); - i40e_dbg_dump_stats(pf, &pf->stats); - dev_info(&pf->pdev->dev, "pf stats_offsets:\n"); - i40e_dbg_dump_stats(pf, &pf->stats_offsets); } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { dev_info(&pf->pdev->dev, "core reset count: %d\n", pf->corer_count); @@ -1401,6 +1316,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, kfree(bw_data); bw_data = NULL; + dev_info(&pf->pdev->dev, + "port dcbx_mode=%d\n", cfg->dcbx_mode); dev_info(&pf->pdev->dev, "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", cfg->etscfg.willing, cfg->etscfg.cbs, @@ -1464,8 +1381,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else { dev_info(&pf->pdev->dev, "dump desc tx [], dump desc rx [],\n"); - dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n"); - dev_info(&pf->pdev->dev, "dump stats\n"); + dev_info(&pf->pdev->dev, "dump switch\n"); + dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); dev_info(&pf->pdev->dev, "dump reset stats\n"); dev_info(&pf->pdev->dev, "dump port\n"); dev_info(&pf->pdev->dev, @@ -1580,7 +1497,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[11], - "%hx %hx %hx %hx %x %x %x %x %x %x", + "%hi %hi %hi %hi %i %i %i %i %i %i", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, @@ -1628,7 +1545,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[20], - "%hx %hx %hx %hx %x %x %x %x %x %x %hd", + "%hi %hi %hi %hi %i %i %i %i %i %i %hi", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 1dda467ae1ac62bdc5428e4c6086c322fdac597d..951e8767fc502e57e91b40412dba09fa27f0e8c6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -40,8 +40,9 @@ struct i40e_stats { .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } + #define I40E_NETDEV_STAT(_net_stat) \ - I40E_STAT(struct net_device_stats, #_net_stat, _net_stat) + I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) #define I40E_PF_STAT(_name, _stat) \ I40E_STAT(struct i40e_pf, _name, _stat) #define I40E_VSI_STAT(_name, _stat) \ @@ -264,6 +265,14 @@ static int i40e_get_settings(struct net_device *netdev, ecmd->supported = SUPPORTED_10000baseKR_Full; ecmd->advertising = ADVERTISED_10000baseKR_Full; break; + case I40E_DEV_ID_10G_BASE_T: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full; + break; default: /* all the rest are 10G/1G */ ecmd->supported = SUPPORTED_10000baseT_Full | @@ -322,9 +331,13 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_10GBASE_T: ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full; + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full; + ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: @@ -335,14 +348,22 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_1000BASE_T: ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; + ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_SGMII: @@ -426,6 +447,9 @@ no_valid_phy_type: case I40E_LINK_SPEED_1GB: ethtool_cmd_speed_set(ecmd, SPEED_1000); break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; default: break; } @@ -528,7 +552,7 @@ static int i40e_set_settings(struct net_device *netdev, } /* If autoneg is currently enabled */ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { - config.abilities = abilities.abilities | + config.abilities = abilities.abilities & ~I40E_AQ_PHY_ENABLE_AN; change = true; } @@ -621,11 +645,19 @@ static void i40e_get_pauseparam(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; pause->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); + /* PFC enabled so report LFC as off */ + if (dcbx_cfg->pfc.pfcenable) { + pause->rx_pause = 0; + pause->tx_pause = 0; + return; + } + if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { pause->rx_pause = 1; } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { @@ -649,6 +681,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; i40e_status status; u8 aq_failures; @@ -670,8 +703,9 @@ static int i40e_set_pauseparam(struct net_device *netdev, netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); } - if (hw->fc.current_mode == I40E_FC_PFC) { - netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n"); + if (dcbx_cfg->pfc.pfcenable) { + netdev_info(netdev, + "Priority flow control enabled. Cannot set link flow control.\n"); return -EOPNOTSUPP; } @@ -788,7 +822,7 @@ static int i40e_get_eeprom(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; - int ret_val = 0, len; + int ret_val = 0, len, offset; u8 *eeprom_buff; u16 i, sectors; bool last; @@ -801,19 +835,21 @@ static int i40e_get_eeprom(struct net_device *netdev, /* check for NVMUpdate access method */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic && eeprom->magic != magic) { + struct i40e_nvm_access *cmd; int errno; /* make sure it is the right magic for NVMUpdate */ if ((eeprom->magic >> 16) != hw->device_id) return -EINVAL; - ret_val = i40e_nvmupd_command(hw, - (struct i40e_nvm_access *)eeprom, - bytes, &errno); + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); if (ret_val) dev_info(&pf->pdev->dev, - "NVMUpdate read failed err=%d status=0x%x\n", - ret_val, hw->aq.asq_last_status); + "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); return errno; } @@ -842,20 +878,29 @@ static int i40e_get_eeprom(struct net_device *netdev, len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); last = true; } - ret_val = i40e_aq_read_nvm(hw, 0x0, - eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), - len, + offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), + ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len, (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), last, NULL); - if (ret_val) { + if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { + dev_info(&pf->pdev->dev, + "read NVM failed, invalid offset 0x%x\n", + offset); + break; + } else if (ret_val && + hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { dev_info(&pf->pdev->dev, - "read NVM failed err=%d status=0x%x\n", - ret_val, hw->aq.asq_last_status); - goto release_nvm; + "read NVM failed, access, offset 0x%x\n", + offset); + break; + } else if (ret_val) { + dev_info(&pf->pdev->dev, + "read NVM failed offset %d err=%d status=0x%x\n", + offset, ret_val, hw->aq.asq_last_status); + break; } } -release_nvm: i40e_release_nvm(hw); memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); free_buff: @@ -883,6 +928,7 @@ static int i40e_set_eeprom(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; + struct i40e_nvm_access *cmd; int ret_val = 0; int errno; u32 magic; @@ -900,12 +946,14 @@ static int i40e_set_eeprom(struct net_device *netdev, test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) return -EBUSY; - ret_val = i40e_nvmupd_command(hw, (struct i40e_nvm_access *)eeprom, - bytes, &errno); - if (ret_val) + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); + if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) dev_info(&pf->pdev->dev, - "NVMUpdate write failed err=%d status=0x%x\n", - ret_val, hw->aq.asq_last_status); + "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); return errno; } @@ -1292,6 +1340,10 @@ static int i40e_get_ts_info(struct net_device *dev, { struct i40e_pf *pf = i40e_netdev_to_pf(dev); + /* only report HW timestamping if PTP is enabled */ + if (!(pf->flags & I40E_FLAG_PTP)) + return ethtool_op_get_ts_info(dev, info); + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | @@ -1355,6 +1407,9 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data) netif_info(pf, hw, netdev, "eeprom test\n"); *data = i40e_diag_eeprom_test(&pf->hw); + /* forcebly clear the NVM Update state machine */ + pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; + return *data; } @@ -1367,7 +1422,10 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data) netif_info(pf, hw, netdev, "interrupt test\n"); wr32(&pf->hw, I40E_PFINT_DYN_CTL0, (I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); usleep_range(1000, 2000); *data = (swc_old == pf->sw_int_count); @@ -1551,13 +1609,10 @@ static int i40e_set_coalesce(struct net_device *netdev, vsi->rx_itr_setting = ec->rx_coalesce_usecs; } else if (ec->rx_coalesce_usecs == 0) { vsi->rx_itr_setting = ec->rx_coalesce_usecs; - i40e_irq_dynamic_disable(vsi, vector); if (ec->use_adaptive_rx_coalesce) - netif_info(pf, drv, netdev, - "Rx-secs=0, need to disable adaptive-Rx for a complete disable\n"); + netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); } else { - netif_info(pf, drv, netdev, - "Invalid value, Rx-usecs range is 0, 8-8160\n"); + netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; } @@ -1566,13 +1621,11 @@ static int i40e_set_coalesce(struct net_device *netdev, vsi->tx_itr_setting = ec->tx_coalesce_usecs; } else if (ec->tx_coalesce_usecs == 0) { vsi->tx_itr_setting = ec->tx_coalesce_usecs; - i40e_irq_dynamic_disable(vsi, vector); if (ec->use_adaptive_tx_coalesce) - netif_info(pf, drv, netdev, - "Tx-secs=0, need to disable adaptive-Tx for a complete disable\n"); + netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); } else { netif_info(pf, drv, netdev, - "Invalid value, Tx-usecs range is 0, 8-8160\n"); + "Invalid value, tx-usecs range is 0-8160\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 5d01db1d789b0c64ddf4871a06fee3a34a19af8f..a8b8bd95108dd587bd961d144af334fb175da7f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -343,7 +343,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) **/ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) { - struct i40e_ieee_app_priority_table app; + struct i40e_dcb_app_priority_table app; struct i40e_hw *hw = &pf->hw; u8 enabled_tc = 0; u8 tc, i; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c3a7f4a4b7757a35afa4a23b944a64e0ac4d673b..0a7ea4c5f9d3f7c79f585ddfe8696f6051556bee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -38,8 +38,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 0 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_MINOR 2 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -74,6 +74,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, /* required last entry */ {0, } }; @@ -812,7 +813,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; + struct i40e_ring *p; u32 rx_page, rx_buf; + u64 bytes, packets; + unsigned int start; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -836,10 +840,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_buf = 0; rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { - struct i40e_ring *p; - u64 bytes, packets; - unsigned int start; - /* locate Tx ring */ p = ACCESS_ONCE(vsi->tx_rings[q]); @@ -2381,6 +2381,35 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) #endif } +/** + * i40e_config_xps_tx_ring - Configure XPS for a Tx ring + * @ring: The Tx ring to configure + * + * This enables/disables XPS for a given Tx descriptor ring + * based on the TCs enabled for the VSI that ring belongs to. + **/ +static void i40e_config_xps_tx_ring(struct i40e_ring *ring) +{ + struct i40e_vsi *vsi = ring->vsi; + cpumask_var_t mask; + + if (ring->q_vector && ring->netdev) { + /* Single TC mode enable XPS */ + if (vsi->tc_config.numtc <= 1 && + !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { + netif_set_xps_queue(ring->netdev, + &ring->q_vector->affinity_mask, + ring->queue_index); + } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + /* Disable XPS to allow selection based on TC */ + bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); + netif_set_xps_queue(ring->netdev, mask, + ring->queue_index); + free_cpumask_var(mask); + } + } +} + /** * i40e_configure_tx_ring - Configure a transmit ring context and rest * @ring: The Tx ring to configure @@ -2404,13 +2433,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) ring->atr_sample_rate = 0; } - /* initialize XPS */ - if (ring->q_vector && ring->netdev && - vsi->tc_config.numtc <= 1 && - !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) - netif_set_xps_queue(ring->netdev, - &ring->q_vector->affinity_mask, - ring->queue_index); + /* configure XPS */ + i40e_config_xps_tx_ring(ring); /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(tx_ctx)); @@ -2462,10 +2486,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) } /* Now associate this queue with this PCI function */ - if (vsi->type == I40E_VSI_VMDQ2) + if (vsi->type == I40E_VSI_VMDQ2) { qtx_ctl = I40E_QTX_CTL_VM_QUEUE; - else + qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & + I40E_QTX_CTL_VFVM_INDX_MASK; + } else { qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + } + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); @@ -3440,7 +3468,7 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; - udelay(10); + usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; @@ -3466,7 +3494,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) /* warn the TX unit of coming changes */ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); if (!enable) - udelay(10); + usleep_range(10, 20); for (j = 0; j < 50; j++) { tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); @@ -3488,6 +3516,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) } wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); + /* No waiting for the Tx queue to disable */ + if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) + continue; /* wait for the change to finish */ ret = i40e_pf_txq_wait(pf, pf_q, enable); @@ -3526,7 +3557,7 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; - udelay(10); + usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; @@ -3855,6 +3886,15 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) if (test_bit(__I40E_DOWN, &vsi->state)) return; + /* No need to disable FCoE VSI when Tx suspended */ + if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && + vsi->type == I40E_VSI_FCOE) { + dev_dbg(&vsi->back->pdev->dev, + "%s: VSI seid %d skipping FCoE VSI disable\n", + __func__, vsi->seid); + return; + } + set_bit(__I40E_NEEDS_RESTART, &vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) { vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); @@ -3907,6 +3947,57 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) } } +#ifdef CONFIG_I40E_DCB +/** + * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled + * @vsi: the VSI being configured + * + * This function waits for the given VSI's Tx queues to be disabled. + **/ +static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int i, pf_q, ret; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + /* Check and wait for the disable status of the queue */ + ret = i40e_pf_txq_wait(pf, pf_q, false); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: VSI seid %d Tx ring %d disable timeout\n", + __func__, vsi->seid, pf_q); + return ret; + } + } + + return 0; +} + +/** + * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled + * @pf: the PF + * + * This function waits for the Tx queues to be in disabled state for all the + * VSIs that are managed by this PF. + **/ +static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) +{ + int v, ret = 0; + + for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + /* No need to wait for FCoE VSI queues */ + if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { + ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); + if (ret) + break; + } + } + + return ret; +} + +#endif /** * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config * @dcbcfg: the corresponding DCBx configuration structure @@ -4377,6 +4468,31 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) } } +/** + * i40e_resume_port_tx - Resume port Tx + * @pf: PF struct + * + * Resume a port's Tx and issue a PF reset in case of failure to + * resume. + **/ +static int i40e_resume_port_tx(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int ret; + + ret = i40e_aq_resume_port_tx(hw, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "AQ command Resume Port Tx failed = %d\n", + pf->hw.aq.asq_last_status); + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } + + return ret; +} + /** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured @@ -4413,6 +4529,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; + dev_dbg(&pf->pdev->dev, + "DCBX offload is supported for this PF.\n"); } } else { dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", @@ -4449,6 +4567,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) case I40E_LINK_SPEED_1GB: strlcpy(speed, "1000 Mbps", SPEED_SIZE); break; + case I40E_LINK_SPEED_100MB: + strncpy(speed, "100 Mbps", SPEED_SIZE); + break; default: break; } @@ -4479,12 +4600,8 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) static int i40e_up_complete(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - u8 set_fc_aq_fail = 0; int err; - /* force flow control off */ - i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_vsi_configure_msix(vsi); else @@ -4753,9 +4870,11 @@ int i40e_vsi_open(struct i40e_vsi *vsi) goto err_set_queues; } else if (vsi->type == I40E_VSI_FDIR) { - snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", - dev_driver_string(&pf->pdev->dev)); + snprintf(int_name, sizeof(int_name) - 1, "%s-%s-fdir", + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); err = i40e_vsi_request_irq(vsi, int_name); + } else { err = -EINVAL; goto err_setup_rx; @@ -4995,6 +5114,8 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); } + dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, + need_reconfig); return need_reconfig; } @@ -5022,11 +5143,16 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Ignore if event is not for Nearest Bridge */ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib bridge type 0x%x\n", __func__, type); if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) return ret; /* Check MIB Type and return if event for Remote MIB update */ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib type %s\n", __func__, + type ? "remote" : "local"); if (type == I40E_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, @@ -5035,12 +5161,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, goto exit; } - /* Convert/store the DCBX data from LLDPDU temporarily */ memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); - ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg); + /* Store the old configuration */ + tmp_dcbx_cfg = *dcbx_cfg; + + /* Get updated DCBX data from firmware */ + ret = i40e_get_dcb_config(&pf->hw); if (ret) { - /* Error in LLDPDU parsing return */ - dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n"); + dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); goto exit; } @@ -5050,12 +5178,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, goto exit; } - need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg); + need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg); - i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg); - - /* Overwrite the new configuration */ - *dcbx_cfg = tmp_dcbx_cfg; + i40e_dcbnl_flush_apps(pf, dcbx_cfg); if (!need_reconfig) goto exit; @@ -5066,13 +5191,24 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, else pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); /* Changes in configuration update VEB/VSI */ i40e_dcb_reconfigure(pf); - i40e_pf_unquiesce_all_vsi(pf); + ret = i40e_resume_port_tx(pf); + + clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + /* In case of error no point in resuming VSIs */ + if (ret) + goto exit; + + /* Wait for the PF's Tx queues to be disabled */ + ret = i40e_pf_wait_txq_disabled(pf); + if (!ret) + i40e_pf_unquiesce_all_vsi(pf); exit: return ret; } @@ -5212,6 +5348,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) int flush_wait_retry = 50; int reg; + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + if (time_after(jiffies, pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); @@ -5273,6 +5412,9 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) if (test_bit(__I40E_DOWN, &pf->state)) return; + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) && (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) && (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count)) @@ -5310,8 +5452,6 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) break; case I40E_VSI_SRIOV: - break; - case I40E_VSI_VMDQ2: case I40E_VSI_CTRL: case I40E_VSI_MIRROR: @@ -5353,14 +5493,21 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) static void i40e_link_event(struct i40e_pf *pf) { bool new_link, old_link; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + + /* set this to force the get_link_status call to refresh state */ + pf->hw.phy.get_link_info = true; - new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP); old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); + new_link = i40e_get_link_status(&pf->hw); - if (new_link == old_link) + if (new_link == old_link && + (test_bit(__I40E_DOWN, &vsi->state) || + new_link == netif_carrier_ok(vsi->netdev))) return; - if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) - i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link); + + if (!test_bit(__I40E_DOWN, &vsi->state)) + i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. @@ -5368,7 +5515,7 @@ static void i40e_link_event(struct i40e_pf *pf) if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); else - i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link); + i40e_vsi_link_event(vsi, new_link); if (pf->vf) i40e_vc_notify_link_state(pf); @@ -5418,11 +5565,17 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, (I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); } else { u16 vec = vsi->base_vector - 1; u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); for (i = 0; i < vsi->num_q_vectors; i++, vec++) wr32(&vsi->back->hw, I40E_PFINT_DYN_CTLN(vec), val); @@ -5433,7 +5586,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) } /** - * i40e_watchdog_subtask - Check and bring link up + * i40e_watchdog_subtask - periodic checks not using event driven response * @pf: board private structure **/ static void i40e_watchdog_subtask(struct i40e_pf *pf) @@ -5445,6 +5598,15 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) test_bit(__I40E_CONFIG_BUSY, &pf->state)) return; + /* make sure we don't do these things too often */ + if (time_before(jiffies, (pf->service_timer_previous + + pf->service_timer_period))) + return; + pf->service_timer_previous = jiffies; + + i40e_check_hang_subtask(pf); + i40e_link_event(pf); + /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ @@ -5525,33 +5687,20 @@ static void i40e_handle_link_event(struct i40e_pf *pf, memcpy(&pf->hw.phy.link_info_old, hw_link_info, sizeof(pf->hw.phy.link_info_old)); + /* Do a new status request to re-enable LSE reporting + * and load new status information into the hw struct + * This completely ignores any state information + * in the ARQ event info, instead choosing to always + * issue the AQ update link status command. + */ + i40e_link_event(pf); + /* check for unqualified module, if link is down */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP))) dev_err(&pf->pdev->dev, "The driver failed to link because an unqualified module was detected.\n"); - - /* update link status */ - hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; - hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; - hw_link_info->link_info = status->link_info; - hw_link_info->an_info = status->an_info; - hw_link_info->ext_info = status->ext_info; - hw_link_info->lse_enable = - le16_to_cpu(status->command_flags) & - I40E_AQ_LSE_ENABLE; - - /* process the event */ - i40e_link_event(pf); - - /* Do a new status request to re-enable LSE reporting - * and load new status information into the hw struct, - * then see if the status changed while processing the - * initial event. - */ - i40e_update_link_info(&pf->hw, true); - i40e_link_event(pf); } /** @@ -5607,13 +5756,12 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) if (oldval != val) wr32(&pf->hw, pf->hw.aq.asq.len, val); - event.msg_size = I40E_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40E_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return; do { - event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */ ret = i40e_clean_arq_element(hw, &event, &pending); if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) break; @@ -5634,7 +5782,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) le32_to_cpu(event.desc.cookie_high), le32_to_cpu(event.desc.cookie_low), event.msg_buf, - event.msg_size); + event.msg_len); break; case i40e_aqc_opc_lldp_update_mib: dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); @@ -5740,6 +5888,9 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; + /* Enable LB mode for the main VSI now that it is on a VEB */ + i40e_enable_pf_switch_lb(pf); + /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) @@ -5967,6 +6118,7 @@ static void i40e_send_version(struct i40e_pf *pf) static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) { struct i40e_hw *hw = &pf->hw; + u8 set_fc_aq_fail = 0; i40e_status ret; u32 v; @@ -6038,6 +6190,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (ret) goto end_core_reset; + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + ret = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (ret) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); + + /* make sure our flow control settings are restored */ + ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); + if (ret) + dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); + /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only * need to rebuild the switch model in the HW. @@ -6092,6 +6258,13 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } + msleep(75); + ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (ret) { + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); @@ -6149,12 +6322,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; - u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> I40E_GL_MDET_TX_VF_NUM_SHIFT; u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; - u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> - I40E_GL_MDET_TX_QUEUE_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", event, queue, pf_num, vf_num); @@ -6167,8 +6341,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_RX_FUNCTION_SHIFT; u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; - u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> - I40E_GL_MDET_RX_QUEUE_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; if (netif_msg_rx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", event, queue, func); @@ -6298,7 +6473,6 @@ static void i40e_service_task(struct work_struct *work) i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); - i40e_check_hang_subtask(pf); i40e_sync_filters_subtask(pf); #ifdef CONFIG_I40E_VXLAN i40e_sync_vxlan_filters_subtask(pf); @@ -6676,6 +6850,7 @@ static int i40e_init_msix(struct i40e_pf *pf) { i40e_status err = 0; struct i40e_hw *hw = &pf->hw; + int other_vecs = 0; int v_budget, i; int vec; @@ -6701,10 +6876,10 @@ static int i40e_init_msix(struct i40e_pf *pf) */ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); pf->num_vmdq_msix = pf->num_vmdq_qps; - v_budget = 1 + pf->num_lan_msix; - v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); + other_vecs = 1; + other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) - v_budget++; + other_vecs++; #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) { @@ -6714,7 +6889,9 @@ static int i40e_init_msix(struct i40e_pf *pf) #endif /* Scale down if necessary, and the rings will share vectors */ - v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors); + pf->num_lan_msix = min_t(int, pf->num_lan_msix, + (hw->func_caps.num_msix_vectors - other_vecs)); + v_budget = pf->num_lan_msix + other_vecs; pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -6964,20 +7141,16 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) **/ static int i40e_config_rss(struct i40e_pf *pf) { - /* Set of random keys generated using kernel random number generator */ - static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687, - 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, - 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, - 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; + u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; struct i40e_hw *hw = &pf->hw; u32 lut = 0; int i, j; u64 hena; u32 reg_val; - /* Fill out hash function seed */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), seed[i]); + wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | @@ -7341,7 +7514,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev, #endif static int i40e_get_phys_port_id(struct net_device *netdev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -7356,18 +7529,18 @@ static int i40e_get_phys_port_id(struct net_device *netdev, return 0; } -#ifdef HAVE_FDB_OPS -#ifdef USE_CONST_DEV_UC_CHAR +/** + * i40e_ndo_fdb_add - add an entry to the hardware database + * @ndm: the input from the stack + * @tb: pointer to array of nladdr (unused) + * @dev: the net device pointer + * @addr: the MAC address entry being added + * @flags: instructions from stack about fdb operation + */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, - u16 flags) -#else -static int i40e_ndo_fdb_add(struct ndmsg *ndm, - struct net_device *dev, - unsigned char *addr, + const unsigned char *addr, u16 vid, u16 flags) -#endif { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_pf *pf = np->vsi->back; @@ -7398,55 +7571,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, return err; } -#ifndef USE_DEFAULT_FDB_DEL_DUMP -#ifdef USE_CONST_DEV_UC_CHAR -static int i40e_ndo_fdb_del(struct ndmsg *ndm, - struct net_device *dev, - const unsigned char *addr) -#else -static int i40e_ndo_fdb_del(struct ndmsg *ndm, - struct net_device *dev, - unsigned char *addr) -#endif -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - struct i40e_pf *pf = np->vsi->back; - int err = -EOPNOTSUPP; - - if (ndm->ndm_state & NUD_PERMANENT) { - netdev_info(dev, "FDB only supports static addresses\n"); - return -EINVAL; - } - - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { - if (is_unicast_ether_addr(addr)) - err = dev_uc_del(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_del(dev, addr); - else - err = -EINVAL; - } - - return err; -} - -static int i40e_ndo_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, - struct net_device *filter_dev, - int idx) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - struct i40e_pf *pf = np->vsi->back; - - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) - idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx); - - return idx; -} - -#endif /* USE_DEFAULT_FDB_DEL_DUMP */ -#endif /* HAVE_FDB_OPS */ static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -7480,13 +7604,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_del_vxlan_port = i40e_del_vxlan_port, #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, -#ifdef HAVE_FDB_OPS .ndo_fdb_add = i40e_ndo_fdb_add, -#ifndef USE_DEFAULT_FDB_DEL_DUMP - .ndo_fdb_del = i40e_ndo_fdb_del, - .ndo_fdb_dump = i40e_ndo_fdb_dump, -#endif -#endif }; /** @@ -7682,6 +7800,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = 0x1; /* regular data port */ ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -7931,8 +8053,8 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, - "failed to get queue tracking for VSI %d, err=%d\n", - vsi->seid, vsi->base_vector); + "failed to get tracking for %d vectors for VSI %d, err=%d\n", + vsi->num_q_vectors, vsi->seid, vsi->base_vector); i40e_vsi_free_q_vectors(vsi); ret = -ENOENT; goto vector_setup_out; @@ -7968,8 +8090,9 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { - dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", - vsi->seid, ret); + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; @@ -8066,7 +8189,15 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); - + if (veb) { + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { + dev_info(&vsi->back->pdev->dev, + "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", + __func__); + return NULL; + } + i40e_enable_pf_switch_lb(pf); + } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; @@ -8098,8 +8229,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { - dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", - vsi->seid, ret); + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; @@ -8206,6 +8338,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); veb->bw_max_quanta = ets_data.tc_bw_max; veb->is_abs_credits = bw_data.absolute_credits_enable; + veb->enabled_tc = ets_data.tc_valid_bits; tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -8715,6 +8848,14 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_update_link_info(&pf->hw, true); i40e_link_event(pf); + /* Initialize user-specific link properties */ + pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & + I40E_AQ_AN_COMPLETED) ? true : false); + + /* fill in link information and enable LSE reporting */ + i40e_update_link_info(&pf->hw, true); + i40e_link_event(pf); + /* Initialize user-specific link properties */ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? true : false); @@ -8987,6 +9128,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.func = PCI_FUNC(pdev->devfn); pf->instance = pfs_found; + if (debug != -1) { + pf->msg_enable = pf->hw.debug_mask; + pf->msg_enable = debug; + } + /* do a special CORER for clearing PXE mode once at init */ if (hw->revision_id == 0 && (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { @@ -9012,9 +9158,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; + snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, - "%s-pf%d:misc", - dev_driver_string(&pf->pdev->dev), pf->hw.pf_id); + "%s-%s:misc", + dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); err = i40e_init_shared_code(hw); if (err) { @@ -9158,6 +9305,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + err = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (err) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); + + msleep(75); + err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (err) { + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 25c4f9a3011f59715f1d9489587d14c310a10097..3e70f2e45a4768986a0a90ee21bdf98790db9b53 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -61,7 +61,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) } else { /* Blank programming mode */ nvm->blank_nvm_mode = true; ret_code = I40E_ERR_NVM_BLANK_MODE; - hw_dbg(hw, "NVM init error: unsupported blank mode.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); } return ret_code; @@ -80,46 +80,45 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, { i40e_status ret_code = 0; u64 gtime, timeout; - u64 time = 0; + u64 time_left = 0; if (hw->nvm.blank_nvm_mode) goto i40e_i40e_acquire_nvm_exit; ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, - 0, &time, NULL); + 0, &time_left, NULL); /* Reading the Global Device Timer */ gtime = rd32(hw, I40E_GLVFGEN_TIMER); /* Store the timeout */ - hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime; + hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; - if (ret_code) { - /* Set the polling timeout */ - if (time > I40E_MAX_NVM_TIMEOUT) - timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) - + gtime; - else - timeout = hw->nvm.hw_semaphore_timeout; + if (ret_code) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", + access, time_left, ret_code, hw->aq.asq_last_status); + + if (ret_code && time_left) { /* Poll until the current NVM owner timeouts */ - while (gtime < timeout) { + timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; + while ((gtime < timeout) && time_left) { usleep_range(10000, 20000); + gtime = rd32(hw, I40E_GLVFGEN_TIMER); ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, - access, 0, &time, + access, 0, &time_left, NULL); if (!ret_code) { hw->nvm.hw_semaphore_timeout = - I40E_MS_TO_GTIME(time) + gtime; + I40E_MS_TO_GTIME(time_left) + gtime; break; } - gtime = rd32(hw, I40E_GLVFGEN_TIMER); } if (ret_code) { hw->nvm.hw_semaphore_timeout = 0; - hw->nvm.hw_semaphore_wait = - I40E_MS_TO_GTIME(time) + gtime; - hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n", - time); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", + time_left, ret_code, hw->aq.asq_last_status); } } @@ -160,7 +159,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) udelay(5); } if (ret_code == I40E_ERR_TIMEOUT) - hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n"); + i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); return ret_code; } @@ -179,7 +178,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u32 sr_reg; if (offset >= hw->nvm.sr_size) { - hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: offset %d beyond Shadow RAM limit %d\n", + offset, hw->nvm.sr_size); ret_code = I40E_ERR_PARAM; goto read_nvm_exit; } @@ -202,8 +203,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, } } if (ret_code) - hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", - offset); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", + offset); read_nvm_exit: return ret_code; @@ -263,14 +265,20 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, * Firmware will check the module-based model. */ if ((offset + words) > hw->nvm.sr_size) - hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) /* We can write only up to 4KB (one sector), in one AQ write */ - hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) /* A single write cannot spread over two sectors */ - hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); else ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ @@ -438,6 +446,22 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val) return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } +static char *i40e_nvm_update_state_str[] = { + "I40E_NVMUPD_INVALID", + "I40E_NVMUPD_READ_CON", + "I40E_NVMUPD_READ_SNT", + "I40E_NVMUPD_READ_LCB", + "I40E_NVMUPD_READ_SA", + "I40E_NVMUPD_WRITE_ERA", + "I40E_NVMUPD_WRITE_CON", + "I40E_NVMUPD_WRITE_SNT", + "I40E_NVMUPD_WRITE_LCB", + "I40E_NVMUPD_WRITE_SA", + "I40E_NVMUPD_CSUM_CON", + "I40E_NVMUPD_CSUM_SA", + "I40E_NVMUPD_CSUM_LCB", +}; + /** * i40e_nvmupd_command - Process an NVM update command * @hw: pointer to hardware structure @@ -471,6 +495,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, default: /* invalid state, should never happen */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: no such state %d\n", hw->nvmupd_state); status = I40E_NOT_SUPPORTED; *errno = -ESRCH; break; @@ -501,7 +527,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_READ_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); i40e_release_nvm(hw); @@ -511,17 +538,22 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_READ_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); - hw->nvmupd_state = I40E_NVMUPD_STATE_READING; + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_READING; } break; case I40E_NVMUPD_WRITE_ERA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_erase(hw, cmd, errno); if (status) @@ -534,7 +566,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); if (status) @@ -547,22 +580,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; } break; case I40E_NVMUPD_CSUM_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_update_nvm_checksum(hw); if (status) { *errno = hw->aq.asq_last_status ? - i40e_aq_rc_to_posix(hw->aq.asq_last_status) : + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : -EIO; i40e_release_nvm(hw); } else { @@ -572,6 +611,9 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, break; default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in init state\n", + i40e_nvm_update_state_str[upd_cmd]); status = I40E_ERR_NVM; *errno = -ESRCH; break; @@ -611,6 +653,9 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, break; default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in reading state.\n", + i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; *errno = -ESRCH; break; @@ -644,33 +689,38 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_LCB: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - if (!status) { + if (!status) hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - } + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; case I40E_NVMUPD_CSUM_CON: status = i40e_update_nvm_checksum(hw); - if (status) + if (status) { *errno = hw->aq.asq_last_status ? - i40e_aq_rc_to_posix(hw->aq.asq_last_status) : + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } break; case I40E_NVMUPD_CSUM_LCB: status = i40e_update_nvm_checksum(hw); - if (status) { + if (status) *errno = hw->aq.asq_last_status ? - i40e_aq_rc_to_posix(hw->aq.asq_last_status) : + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : -EIO; - } else { + else hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - } + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in writing state.\n", + i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; *errno = -ESRCH; break; @@ -702,8 +752,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, /* limits on data size */ if ((cmd->data_size < 1) || (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { - hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n", - cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command data_size %d\n", + cmd->data_size); *errno = -EFAULT; return I40E_NVMUPD_INVALID; } @@ -755,12 +806,16 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, } break; } + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->aq.nvm_release_on_done); if (upd_cmd == I40E_NVMUPD_INVALID) { *errno = -EFAULT; - hw_dbg(hw, - "i40e_nvmupd_validate_command returns %d errno: %d\n", - upd_cmd, *errno); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *errno); } return upd_cmd; } @@ -785,14 +840,18 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); - hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, NULL); - hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status); - if (status) - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } return status; } @@ -816,13 +875,17 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); - hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, last, NULL); - hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status); - if (status) - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } return status; } @@ -847,13 +910,18 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); - hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); + status = i40e_aq_update_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, NULL); - hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status); - if (status) - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 0988b5c1fe87d080e950f146c4c0c0b8ae231189..2fb4306597e8222fbc7c84d535ed1a307af6d2cd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -84,6 +84,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset); +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, @@ -173,6 +175,9 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, @@ -228,6 +233,10 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg); /* i40e_common */ i40e_status i40e_init_shared_code(struct i40e_hw *hw); i40e_status i40e_pf_reset(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 537b6216971d3b77c9ca169f01e2f6aaeb1f2ad1..6d1ec926aa3713a6ebc40fa52aa2ee873f8292f4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -382,11 +382,17 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) incval = I40E_PTP_1GB_INCVAL; break; case I40E_LINK_SPEED_100MB: - dev_warn(&pf->pdev->dev, - "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n", - __func__); + { + static int warn_once; + + if (!warn_once) { + dev_warn(&pf->pdev->dev, + "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n"); + warn_once++; + } incval = 0; break; + } case I40E_LINK_SPEED_40GB: default: incval = I40E_PTP_40GB_INCVAL; @@ -418,6 +424,9 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) { struct hwtstamp_config *config = &pf->tstamp_config; + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : 0; } @@ -438,22 +447,12 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, struct hwtstamp_config *config) { struct i40e_hw *hw = &pf->hw; - u32 pf_id, tsyntype, regval; + u32 tsyntype, regval; /* Reserved for future extensions. */ if (config->flags) return -EINVAL; - /* Confirm that 1588 is supported on this PF. */ - pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> - I40E_PRTTSYN_CTL0_PF_ID_SHIFT; - if (hw->pf_id != pf_id) { - dev_err(&pf->pdev->dev, - "PF %d attempted to control timestamp mode on port %d, which is owned by PF %d\n", - hw->pf_id, hw->port, pf_id); - return -EPERM; - } - switch (config->tx_type) { case HWTSTAMP_TX_OFF: pf->ptp_tx = false; @@ -556,6 +555,9 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) struct hwtstamp_config config; int err; + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; @@ -625,8 +627,22 @@ void i40e_ptp_init(struct i40e_pf *pf) { struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; struct i40e_hw *hw = &pf->hw; + u32 pf_id; long err; + /* Only one PF is assigned to control 1588 logic per port. Do not + * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID + */ + pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> + I40E_PRTTSYN_CTL0_PF_ID_SHIFT; + if (hw->pf_id != pf_id) { + pf->flags &= ~I40E_FLAG_PTP; + dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", + __func__, + netdev->name); + return; + } + /* we have to initialize the lock first, since we can't control * when the user will enter the PHC device entry points */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 3195d82e49425369951185ccc1e664a8485ea716..04b441460bbda6e36cb64731d24247a1b954e506 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2399,12 +2399,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* hardware can't handle really short frames, hardware padding works * beyond this point */ - if (unlikely(skb->len < I40E_MIN_TX_LEN)) { - if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) - return NETDEV_TX_OK; - skb->len = I40E_MIN_TX_LEN; - skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); - } + if (skb_put_padto(skb, I40E_MIN_TX_LEN)) + return NETDEV_TX_OK; return i40e_xmit_frame_ring(skb, tx_ring); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index d7a625a6a14f54fa1b79456df9de7d14f9ff1809..e60d3accb2e2ec3f2992b056da718d633b978157 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -30,10 +30,7 @@ /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ -#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ -#define I40E_MAX_IRATE 0x03F -#define I40E_MIN_IRATE 0x001 -#define I40E_IRATE_USEC_RESOLUTION 4 +#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 #define I40E_ITR_20K 0x0019 #define I40E_ITR_8K 0x003E diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index ce04d9093db63b99c46a0e055525a347a5f2d48a..c1f2eb96335771fd9eae58623bd98a90132cbe41 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -43,6 +43,7 @@ #define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -260,8 +261,7 @@ enum i40e_aq_resource_access_type { }; struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */ - u64 hw_semaphore_wait; /* - || - */ + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ u32 timeout; /* [ms] */ u16 sr_size; /* Shadow RAM size in words */ bool blank_nvm_mode; /* is NVM empty (no FW present)*/ @@ -380,9 +380,18 @@ struct i40e_fc_info { #define I40E_MAX_USER_PRIORITY 8 #define I40E_DCBX_MAX_APPS 32 #define I40E_LLDPDU_SIZE 1500 - -/* IEEE 802.1Qaz ETS Configuration data */ -struct i40e_ieee_ets_config { +#define I40E_TLV_STATUS_OPER 0x1 +#define I40E_TLV_STATUS_SYNC 0x2 +#define I40E_TLV_STATUS_ERR 0x4 +#define I40E_CEE_OPER_MAX_APPS 3 +#define I40E_APP_PROTOID_FCOE 0x8906 +#define I40E_APP_PROTOID_ISCSI 0x0cbc +#define I40E_APP_PROTOID_FIP 0x8914 +#define I40E_APP_SEL_ETHTYPE 0x1 +#define I40E_APP_SEL_TCPIP 0x2 + +/* CEE or IEEE 802.1Qaz ETS Configuration data */ +struct i40e_dcb_ets_config { u8 willing; u8 cbs; u8 maxtcs; @@ -391,34 +400,30 @@ struct i40e_ieee_ets_config { u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; }; -/* IEEE 802.1Qaz ETS Recommendation data */ -struct i40e_ieee_ets_recommend { - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz PFC Configuration data */ -struct i40e_ieee_pfc_config { +/* CEE or IEEE 802.1Qaz PFC Configuration data */ +struct i40e_dcb_pfc_config { u8 willing; u8 mbc; u8 pfccap; u8 pfcenable; }; -/* IEEE 802.1Qaz Application Priority data */ -struct i40e_ieee_app_priority_table { +/* CEE or IEEE 802.1Qaz Application Priority data */ +struct i40e_dcb_app_priority_table { u8 priority; u8 selector; u16 protocolid; }; struct i40e_dcbx_config { + u8 dcbx_mode; +#define I40E_DCBX_MODE_CEE 0x1 +#define I40E_DCBX_MODE_IEEE 0x2 u32 numapps; - struct i40e_ieee_ets_config etscfg; - struct i40e_ieee_ets_recommend etsrec; - struct i40e_ieee_pfc_config pfc; - struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; + struct i40e_dcb_ets_config etscfg; + struct i40e_dcb_ets_config etsrec; + struct i40e_dcb_pfc_config pfc; + struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; }; /* Port hardware description */ @@ -476,6 +481,11 @@ struct i40e_hw { u32 debug_mask; }; +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return hw->mac.type == I40E_MAC_VF; +} + struct i40e_driver_version { u8 major_version; u8 minor_version; @@ -1371,6 +1381,18 @@ enum i40e_reset_type { I40E_RESET_EMPR = 3, }; +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0xD +struct i40e_lldp_variables { + u16 length; + u16 adminstatus; + u16 msgfasttx; + u16 msgtxinterval; + u16 txparams; + u16 timers; + u16 crc8; +}; + /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 70951d2edcad9f04a9cc4bac25701dc61a4c4484..61dd1b18762476fd34312c2eafced083c2c86bfb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -79,6 +79,7 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_VIRTCHNL_OP_GET_STATS, I40E_VIRTCHNL_OP_FCOE, + I40E_VIRTCHNL_OP_CONFIG_RSS, /* PF sends status change events to vfs using * the following op. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4eeed267e4b71ce9d242d3a71fccd5ad38600c8f..5bae89550657c19e9c5eeae08b13615c9744f158 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -674,7 +674,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) * that the requested op was completed * successfully */ - udelay(10); + usleep_range(10, 20); reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { rsd = true; @@ -707,7 +707,6 @@ complete_reset: wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); i40e_flush(hw); } -#ifdef CONFIG_PCI_IOV /** * i40e_enable_pf_switch_lb @@ -715,7 +714,7 @@ complete_reset: * * enable switch loop back or die - no point in a return value **/ -static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; @@ -742,7 +741,6 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) __func__, vsi->back->hw.aq.asq_last_status); } } -#endif /** * i40e_disable_pf_switch_lb @@ -1869,6 +1867,12 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) return 0; + /* re-enable vflr interrupt cause */ + reg = rd32(hw, I40E_PFINT_ICR0_ENA); + reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); + i40e_flush(hw); + clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; @@ -1885,12 +1889,6 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) } } - /* re-enable vflr interrupt cause */ - reg = rd32(hw, I40E_PFINT_ICR0_ENA); - reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; - wr32(hw, I40E_PFINT_ICR0_ENA, reg); - i40e_flush(hw); - return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 0adc61e1052db9dabdcbea3d8930ec709e1868ef..9452f5247cffb7f03803aee6230b93be6873ccf9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -126,5 +126,6 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); +void i40e_enable_pf_switch_lb(struct i40e_pf *pf); #endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index f206be9178422afc01a955f481f8e523cbb3093c..c1d25f8c1abca3276550d1932176f78f39b6859e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -49,7 +49,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) static void i40e_adminq_init_regs(struct i40e_hw *hw) { /* set head and tail registers in our local struct */ - if (hw->mac.type == I40E_MAC_VF) { + if (i40e_is_vf(hw)) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; hw->aq.asq.len = I40E_VF_ATQLEN1; @@ -801,7 +801,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, */ if (!details->async && !details->postpone) { u32 total_delay = 0; - u32 delay_len = 10; do { /* AQ designers suggest use of head for better @@ -809,9 +808,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, */ if (i40evf_asq_done(hw)) break; - /* ugh! delay while spin_lock */ - udelay(delay_len); - total_delay += delay_len; + usleep_range(1000, 2000); + total_delay++; } while (total_delay < hw->aq.asq_cmd_timeout); } @@ -838,9 +836,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } - if (i40e_is_nvm_update_op(desc)) - hw->aq.nvm_busy = true; - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, @@ -907,9 +902,6 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQRX: Queue is empty.\n"); ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } @@ -931,13 +923,10 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, e->desc = *desc; datalen = le16_to_cpu(desc->datalen); - e->msg_size = min(datalen, e->msg_size); - if (e->msg_buf != NULL && (e->msg_size != 0)) + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, - e->msg_size); - - if (i40e_is_nvm_update_op(&e->desc)) - hw->aq.nvm_busy = false; + e->msg_len); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 91a5c5bd80f32242b61920e3f02934fdfb6be251..6c31bf22c2c31f88e996a402c2a4156cd9239f99 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -28,6 +28,7 @@ #define _I40E_ADMINQ_H_ #include "i40e_osdep.h" +#include "i40e_status.h" #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ @@ -76,7 +77,8 @@ struct i40e_asq_cmd_details { /* ARQ event information */ struct i40e_arq_event_info { struct i40e_aq_desc desc; - u16 msg_size; + u16 msg_len; + u16 buf_len; u8 *msg_buf; }; @@ -93,7 +95,6 @@ struct i40e_adminq_info { u16 fw_min_ver; /* firmware minor version */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_busy; bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ @@ -108,7 +109,7 @@ struct i40e_adminq_info { * i40e_aq_rc_to_posix - convert errors to user-land codes * aq_rc: AdminQ error code to convert **/ -static inline int i40e_aq_rc_to_posix(u16 aq_rc) +static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc) -EFBIG, /* I40E_AQ_RC_EFBIG */ }; + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + return -ERANGE; return aq_to_posix[aq_rc]; } /* general information */ #define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ +#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index e656ea7a79207296eea4da9975194239bae4a4bc..ff1b16370da93231854ef076ccf8cb2a99e25299 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -33,8 +33,8 @@ * This file needs to comply with the Linux Kernel coding style. */ -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 #define I40E_FW_API_VERSION_A0_MINOR 0x0000 struct i40e_aq_desc { @@ -67,216 +67,216 @@ struct i40e_aq_desc { */ /* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ }; /* Admin Queue command opcodes */ enum i40e_admin_queue_opc { /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, + i40e_aqc_opc_clear_pxe_mode = 0x0110, /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, + i40e_aqc_opc_event_lan_overflow = 0x1001, /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, - i40e_aqc_opc_debug_modify_internals = 0xFF09, + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, + i40e_aqc_opc_debug_modify_internals = 0xFF09, }; /* command structures and indirect data structures */ @@ -303,7 +303,7 @@ enum i40e_admin_queue_opc { /* This macro is used extensively to ensure that command structures are 16 * bytes in length as they have to map to the raw array of that size. */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) /* internal (0x00XX) commands */ @@ -321,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); /* Send driver version (indirect 0x0002) */ struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); /* Queue Shutdown (direct 0x0003) */ struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); @@ -352,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); @@ -374,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); */ struct i40e_aqc_list_capabilites { u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; @@ -385,123 +385,123 @@ struct i40e_aqc_list_capabilites { I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; }; /* list of caps */ -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 - __le16 table_id; - __le32 pfpm_proxyfc; - __le32 ip_addr; - u8 mac_addr[6]; + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; }; /* Set NS Proxy Table Entry Command (indirect 0x0105) */ struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; }; /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; }; /* Manage MAC Address Read Command (indirect 0x0107) */ struct i40e_aqc_mac_address_read { __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); @@ -517,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); @@ -545,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); * command */ struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); @@ -557,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; }; struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; }; /* Get Switch Configuration (indirect 0x0200) @@ -592,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp { * the first in the array is the header, remainder are elements */ struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; }; /* Add Statistics (direct 0x0201) * Remove Statistics (direct 0x0202) */ struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); /* Get Switch Resource Allocation (indirect 0x0204) */ struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; }; /* Add VSI (indirect 0x0210) @@ -672,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp { * uses the same completion and data structure as Add VSI */ struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); @@ -707,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); struct i40e_aqc_vsi_properties_data { /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 - u8 queueing_opt_reserved[3]; + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; + u8 up_enable_bits; + u8 sched_reserved; /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress table */ - u8 cmd_reserved[8]; + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; + __le16 qs_handle[8]; #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; }; I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); @@ -831,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); * (IS_CTRL_PORT only works on add PV) */ struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); @@ -860,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); */ struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); /* Add VEB (direct 0x0230) */ struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 - u8 enable_tcs; - u8 reserved[9]; +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; + u8 reserved[6]; + __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); @@ -910,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); @@ -929,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); /* used for the command for most vlan commands */ struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); /* indirect data for command and response */ struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_macvlan_completion { @@ -979,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); */ struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; }; /* Add VLAN (indirect 0x0252) @@ -999,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data { * use the generic i40e_aqc_macvlan for the command */ struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; + __le16 vlan_tag; + u8 vlan_flags; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; }; /* Set VSI Promiscuous Modes (direct 0x0254) */ struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; + __le16 promiscuous_flags; + __le16 valid_flags; /* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); @@ -1060,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; + __le16 tag; + __le16 queue_number; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); @@ -1085,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; + __le16 tag; + u8 reserved[12]; }; /* Add multicast E-Tag (direct 0x0257) @@ -1098,22 +1097,22 @@ struct i40e_aqc_remove_tag { * and no external data */ struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; }; @@ -1121,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); @@ -1146,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); * and the generic direct completion structure */ struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; + __le16 queue; + u8 reserved[2]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); @@ -1180,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); * and the generic indirect completion structure */ struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); struct i40e_aqc_add_remove_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; union { struct { u8 reserved[12]; @@ -1206,52 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { u8 data[16]; } v6; } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) -#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007 /* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 /* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 /* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 /* 0x0007 reserved */ /* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; }; struct i40e_aqc_remove_cloud_filters_completion { @@ -1273,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); struct i40e_aqc_add_delete_mirror_rule { __le16 seid; __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 __le16 num_entries; __le16 destination; /* VSI for add, rule id for delete */ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ @@ -1290,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); @@ -1306,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); * the command and response use the same descriptor structure */ struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); @@ -1325,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); * this generic struct to pass the SEID in param0 */ struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); @@ -1340,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp { /* Configure VSI BW limits (direct 0x0400) */ struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); @@ -1354,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; }; /* Query vsi bw configuration (indirect 0x0408) */ struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; }; /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); @@ -1415,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); * Disable Physical Port ETS (indirect 0x0415) */ struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; }; /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure Switching Component Bandwidth Allocation per Tc * (indirect 0x0417) */ struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; }; /* Query Switching Component Configuration (indirect 0x0418) */ struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; }; /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; + __le16 tc_bw_max[2]; + u8 reserved3[32]; }; /* Query Switching Component Bandwidth Allocation per Traffic Type * (indirect 0x041A) */ struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Suspend/resume port TX traffic @@ -1494,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp { * (indirect 0x041D) */ struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ }; /* Get and set the active HMC resource profile and status. * (direct 0x0500) and (direct 0x0501) */ struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); enum i40e_aq_hmc_profile { /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, }; -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 enum i40e_aq_phy_type { I40E_PHY_TYPE_SGMII = 0x0, @@ -1582,147 +1578,147 @@ struct i40e_aqc_module_desc { }; struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 reserved[3]; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; }; /* Set PHY Config (direct 0x0601) */ struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; + __le32 phy_type; + u8 link_speed; + u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ #define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 reserved[3]; + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); /* Set MAC Config command data structure (direct 0x0603) */ struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); /* Restart Auto-Negotiation (direct 0x605) */ struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); /* Set event mask command (direct 0x613) */ struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); @@ -1732,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); * Get Link Partner AN advt register (direct 0x0616) */ struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { - u8 command_flags; + u8 command_flags; #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ @@ -1761,15 +1757,15 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; /* NVM Read command (indirect 0x0701) @@ -1777,40 +1773,40 @@ enum i40e_aq_phy_reg_type { * NVM Update commands (indirect 0x0703) */ struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; + __le16 cmd_flags; #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 #define ANVM_READ_SINGLE_FEATURE 0 #define ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - u8 reserved[2]; - __le32 address_high; - __le32 address_low; + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + u8 reserved[2]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); /* NVM Config Write (indirect 0x0705) */ struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); @@ -1835,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field { * Send to Peer PF command (indirect 0x0803) */ struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); @@ -1874,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); * uses i40e_aq_desc */ struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; + __le16 cmd_flags; #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { - __le32 mode; + __le32 mode; #define I40E_AQ_ALTERNATE_MODE_NONE 0 #define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); @@ -1900,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); /* Lan Queue Overflow Event (direct, 0x1001) */ struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); /* Get LLDP MIB (indirect 0x0A00) */ struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); @@ -1935,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); * also used for the event (with type in the command field) */ struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); @@ -1949,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); * Delete LLDP TLV (indirect 0x0A04) */ struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); /* Update LLDP TLV (indirect 0x0A03) */ struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); @@ -1985,9 +1981,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); /* Start LLDP (direct 0x0A06) */ struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); @@ -1998,13 +1994,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); /* Add Udp Tunnel command and completion (direct 0x0B00) */ struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 - u8 reserved1[10]; + u8 reserved1[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); @@ -2013,8 +2009,8 @@ struct i40e_aqc_add_udp_tunnel_completion { __le16 udp_port; u8 filter_entry_index; u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 u8 total_filters; u8 reserved[11]; }; @@ -2023,23 +2019,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); /* remove UDP Tunnel command (0x0B01) */ struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 pf_filters; - u8 total_filters; - u8 reserved2[11]; + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved; - u8 tunnels_free; - u8 reserved1[9]; + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); @@ -2068,11 +2060,11 @@ struct i40e_aqc_tunnel_key_structure { u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 /* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 @@ -2085,21 +2077,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - u8 param_value2[8]; + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + u8 param_value2[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); @@ -2111,18 +2103,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); /* set test more (0xFF01, internal) */ struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); @@ -2175,21 +2167,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); #define I40E_AQ_CLUSTER_ID_ALTRAM 11 struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 9525605519645eb241f5473bcdd345a0afe21a09..28c40c57d4f520afc3179e36dbb5d2a99558aa68 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -50,6 +50,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index f6dcf9dd9290d95554c7505f4bbd9f5832185597..c7f29626eada77ea577dadcf9cf8d23ffef0fe69 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -30,10 +30,7 @@ /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ -#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ -#define I40E_MAX_IRATE 0x03F -#define I40E_MIN_IRATE 0x001 -#define I40E_IRATE_USEC_RESOLUTION 4 +#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 #define I40E_ITR_20K 0x0019 #define I40E_ITR_8K 0x003E diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 15376436cead3aba82190119ce338cfb5295a82f..68aec11f652335c42463066677775200dbbcb333 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -43,6 +43,7 @@ #define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -259,8 +260,7 @@ enum i40e_aq_resource_access_type { }; struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */ - u64 hw_semaphore_wait; /* - || - */ + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ u32 timeout; /* [ms] */ u16 sr_size; /* Shadow RAM size in words */ bool blank_nvm_mode; /* is NVM empty (no FW present)*/ @@ -475,6 +475,11 @@ struct i40e_hw { u32 debug_mask; }; +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return hw->mac.type == I40E_MAC_VF; +} + struct i40e_driver_version { u8 major_version; u8 minor_version; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index cd18d5689006f19eaf2cf7fa1abade5b7adc1a0b..e0c8208138f4e45171bf47863b225e53dd994b27 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -79,6 +79,7 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_VIRTCHNL_OP_GET_STATS, I40E_VIRTCHNL_OP_FCOE, + I40E_VIRTCHNL_OP_CONFIG_RSS, /* PF sends status change events to vfs using * the following op. */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 30ef519d4b911a5c59b4cbf06316b7be0547acd2..981224743c73e472791136fea44bef6d02aff965 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -191,6 +191,7 @@ struct i40evf_adapter { struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; struct list_head vlan_filter_list; char misc_vector_name[IFNAMSIZ + 9]; + int num_active_queues; /* TX */ struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; @@ -243,7 +244,7 @@ struct i40evf_adapter { struct i40e_hw hw; /* defined in i40e_type.h */ enum i40evf_state_t state; - volatile unsigned long crit_section; + unsigned long crit_section; struct work_struct watchdog_task; bool netdev_registered; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index efee6b290c0f08f6ca51d79cfc5dd6cd7a1dea80..69b97bac182ce763eebb7b93fcb93fa6a55d0178 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -58,8 +58,8 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) #define I40EVF_QUEUE_STATS_LEN(_dev) \ - (((struct i40evf_adapter *) \ - netdev_priv(_dev))->vsi_res->num_queue_pairs \ + (((struct i40evf_adapter *)\ + netdev_priv(_dev))->num_active_queues \ * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) @@ -121,11 +121,11 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; data[i] = *(u64 *)p; } - for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { + for (j = 0; j < adapter->num_active_queues; j++) { data[i++] = adapter->tx_rings[j]->stats.packets; data[i++] = adapter->tx_rings[j]->stats.bytes; } - for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { + for (j = 0; j < adapter->num_active_queues; j++) { data[i++] = adapter->rx_rings[j]->stats.packets; data[i++] = adapter->rx_rings[j]->stats.bytes; } @@ -151,13 +151,13 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); p += ETH_GSTRING_LEN; } - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); @@ -175,6 +175,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) static u32 i40evf_get_msglevel(struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } @@ -189,6 +190,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev) static void i40evf_set_msglevel(struct net_device *netdev, u32 data) { struct i40evf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -219,7 +221,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, * but the number of rings is not reported. **/ static void i40evf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct i40evf_adapter *adapter = netdev_priv(netdev); @@ -280,7 +282,7 @@ static int i40evf_set_ringparam(struct net_device *netdev, * this functionality. **/ static int i40evf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_vsi *vsi = &adapter->vsi; @@ -308,7 +310,7 @@ static int i40evf_get_coalesce(struct net_device *netdev, * Change current coalescing settings. **/ static int i40evf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; @@ -430,7 +432,7 @@ static int i40evf_get_rxnfc(struct net_device *netdev, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = adapter->vsi_res->num_queue_pairs; + cmd->data = adapter->num_active_queues; ret = 0; break; case ETHTOOL_GRXFH: @@ -598,12 +600,12 @@ static void i40evf_get_channels(struct net_device *netdev, struct i40evf_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ - ch->max_combined = adapter->vsi_res->num_queue_pairs; + ch->max_combined = adapter->num_active_queues; ch->max_other = NONQ_VECS; ch->other_count = NONQ_VECS; - ch->combined_count = adapter->vsi_res->num_queue_pairs; + ch->combined_count = adapter->num_active_queues; } /** @@ -621,17 +623,23 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) * i40evf_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table - * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) + * @key: hash key * * Reads the indirection table directly from the hardware. Always returns 0. **/ -static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 hlut_val; int i, j; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); indir[j++] = hlut_val & 0xff; @@ -646,19 +654,26 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) * i40evf_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table - * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) + * @key: hash key * * Returns -EINVAL if the table specifies an inavlid queue id, otherwise * returns 0 after programming the table. **/ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 hlut_val; int i, j; + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { hlut_val = indir[j++]; hlut_val |= indir[j++] << 8; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index c51bc7a33bc50898dedf7f1fdfaa5100c45e501a..cabaf599f562c3bc4aec6c332f801f98387b64ca 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.0.5" +#define DRV_VERSION "1.0.6" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -185,6 +185,7 @@ static void i40evf_tx_timeout(struct net_device *netdev) static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; + wr32(hw, I40E_VFINT_DYN_CTL01, 0); /* read flush */ @@ -200,6 +201,7 @@ static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; + wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK); @@ -226,7 +228,6 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter) } /* read flush */ rd32(hw, I40E_VFGEN_RSTAT); - } /** @@ -253,8 +254,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) * @adapter: board private structure * @mask: bitmap of vectors to trigger **/ -static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, - u32 mask) +static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) { struct i40e_hw *hw = &adapter->hw; int i; @@ -397,8 +397,8 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) int q_vectors; int v_start = 0; int rxr_idx = 0, txr_idx = 0; - int rxr_remaining = adapter->vsi_res->num_queue_pairs; - int txr_remaining = adapter->vsi_res->num_queue_pairs; + int rxr_remaining = adapter->num_active_queues; + int txr_remaining = adapter->num_active_queues; int i, j; int rqpv, tqpv; int err = 0; @@ -551,6 +551,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) { int i; int q_vectors; + q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (i = 0; i < q_vectors; i++) { @@ -584,7 +585,8 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + + for (i = 0; i < adapter->num_active_queues; i++) adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); } @@ -629,7 +631,7 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) rx_buf_len = ALIGN(max_frame, 1024); } - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); adapter->rx_rings[i]->rx_buf_len = rx_buf_len; } @@ -667,9 +669,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) struct i40evf_vlan_filter *f; f = i40evf_find_vlan(adapter, vlan); - if (NULL == f) { + if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (NULL == f) + if (!f) return NULL; f->vlan = vlan; @@ -705,7 +707,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) * @vid: VLAN tag **/ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct i40evf_adapter *adapter = netdev_priv(netdev); @@ -720,7 +722,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, * @vid: VLAN tag **/ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct i40evf_adapter *adapter = netdev_priv(netdev); @@ -772,9 +774,9 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, udelay(1); f = i40evf_find_filter(adapter, macaddr); - if (NULL == f) { + if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (NULL == f) { + if (!f) { clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); return NULL; @@ -881,6 +883,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; + q_vector = adapter->q_vector[q_idx]; napi = &q_vector->napi; napi_enable(napi); @@ -918,8 +921,9 @@ static void i40evf_configure(struct i40evf_adapter *adapter) i40evf_configure_rx(adapter); adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = adapter->rx_rings[i]; + i40evf_alloc_rx_buffers(ring, ring->count); ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); @@ -950,7 +954,7 @@ static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) i40evf_clean_rx_ring(adapter->rx_rings[i]); } @@ -962,7 +966,7 @@ static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) i40evf_clean_tx_ring(adapter->tx_rings[i]); } @@ -1064,7 +1068,7 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter) if (!adapter->vsi_res) return; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { if (adapter->tx_rings[i]) kfree_rcu(adapter->tx_rings[i], rcu); adapter->tx_rings[i] = NULL; @@ -1084,11 +1088,11 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *tx_ring; struct i40e_ring *rx_ring; - tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); + tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL); if (!tx_ring) goto err_out; @@ -1130,7 +1134,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) err = -EIO; goto out; } - pairs = adapter->vsi_res->num_queue_pairs; + pairs = adapter->num_active_queues; /* It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors @@ -1172,14 +1176,14 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, - i40evf_napi_poll, NAPI_POLL_WEIGHT); + i40evf_napi_poll, NAPI_POLL_WEIGHT); adapter->q_vector[q_idx] = q_vector; } @@ -1210,7 +1214,7 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) int napi_vectors; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; - napi_vectors = adapter->vsi_res->num_queue_pairs; + napi_vectors = adapter->num_active_queues; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; @@ -1265,8 +1269,8 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) } dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", - (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" : - "Disabled", adapter->vsi_res->num_queue_pairs); + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); return 0; err_alloc_queues: @@ -1284,6 +1288,7 @@ err_set_interrupt: static void i40evf_watchdog_timer(unsigned long data) { struct i40evf_adapter *adapter = (struct i40evf_adapter *)data; + schedule_work(&adapter->watchdog_task); /* timer will be rescheduled in watchdog task */ } @@ -1295,8 +1300,8 @@ static void i40evf_watchdog_timer(unsigned long data) static void i40evf_watchdog_task(struct work_struct *work) { struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, - watchdog_task); + struct i40evf_adapter, + watchdog_task); struct i40e_hw *hw = &adapter->hw; uint32_t rstat_val; @@ -1334,7 +1339,7 @@ static void i40evf_watchdog_task(struct work_struct *work) /* check for reset */ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + I40E_VFGEN_RSTAT_VFR_STATE_MASK; if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && (rstat_val != I40E_VFR_VFACTIVE) && (rstat_val != I40E_VFR_COMPLETED)) { @@ -1425,7 +1430,7 @@ static int next_queue(struct i40evf_adapter *adapter, int j) { j += 1; - return j >= adapter->vsi_res->num_queue_pairs ? 0 : j; + return j >= adapter->num_active_queues ? 0 : j; } /** @@ -1434,23 +1439,23 @@ static int next_queue(struct i40evf_adapter *adapter, int j) **/ static void i40evf_configure_rss(struct i40evf_adapter *adapter) { + u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; struct i40e_hw *hw = &adapter->hw; u32 lut = 0; int i, j; u64 hena; - /* Set of random keys generated using kernel random number generator */ - static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = { - 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127, - 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0, - 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e, - 0x4954b126 }; + /* No RSS for single queue. */ + if (adapter->num_active_queues == 1) { + wr32(hw, I40E_VFQF_HENA(0), 0); + wr32(hw, I40E_VFQF_HENA(1), 0); + return; + } /* Hash type is configured by the PF - we just supply the key */ - - /* Fill out hash function seed */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), seed[i]); + wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ hena = I40E_DEFAULT_RSS_HENA; @@ -1458,7 +1463,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ - j = adapter->vsi_res->num_queue_pairs; + j = adapter->num_active_queues; for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { j = next_queue(adapter, j); lut = j; @@ -1494,7 +1499,7 @@ static void i40evf_reset_task(struct work_struct *work) while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) - udelay(500); + usleep_range(500, 1000); if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); @@ -1508,8 +1513,7 @@ static void i40evf_reset_task(struct work_struct *work) if ((rstat_val != I40E_VFR_VFACTIVE) && (rstat_val != I40E_VFR_COMPLETED)) break; - else - msleep(I40EVF_RESET_WAIT_MS); + msleep(I40EVF_RESET_WAIT_MS); } if (i == I40EVF_RESET_WAIT_COUNT) { adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; @@ -1523,8 +1527,7 @@ static void i40evf_reset_task(struct work_struct *work) if ((rstat_val == I40E_VFR_VFACTIVE) || (rstat_val == I40E_VFR_COMPLETED)) break; - else - msleep(I40EVF_RESET_WAIT_MS); + msleep(I40EVF_RESET_WAIT_MS); } if (i == I40EVF_RESET_WAIT_COUNT) { struct i40evf_mac_filter *f, *ftmp; @@ -1575,12 +1578,12 @@ continue_reset: /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) dev_warn(&adapter->pdev->dev, - "%s: Failed to destroy the Admin Queue resources\n", - __func__); + "%s: Failed to destroy the Admin Queue resources\n", + __func__); err = i40evf_init_adminq(hw); if (err) dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", - __func__, err); + __func__, err); adapter->aq_pending = 0; adapter->aq_required = 0; @@ -1632,8 +1635,8 @@ static void i40evf_adminq_task(struct work_struct *work) if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) return; - event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return; @@ -1645,13 +1648,9 @@ static void i40evf_adminq_task(struct work_struct *work) i40evf_virtchnl_completion(adapter, v_msg->v_opcode, v_msg->v_retval, event.msg_buf, - event.msg_size); - if (pending != 0) { - dev_info(&adapter->pdev->dev, - "%s: ARQ: Pending events %d\n", - __func__, pending); + event.msg_len); + if (pending != 0) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); - } } while (pending); /* check for error indications */ @@ -1705,10 +1704,9 @@ static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) if (adapter->tx_rings[i]->desc) i40evf_free_tx_resources(adapter->tx_rings[i]); - } /** @@ -1725,7 +1723,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) { int i, err = 0; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { adapter->tx_rings[i]->count = adapter->tx_desc_count; err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); if (!err) @@ -1753,7 +1751,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) { int i, err = 0; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i]->count = adapter->rx_desc_count; err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); if (!err) @@ -1776,7 +1774,7 @@ static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) if (adapter->rx_rings[i]->desc) i40evf_free_rx_resources(adapter->rx_rings[i]); } @@ -1980,7 +1978,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) if ((rstat == I40E_VFR_VFACTIVE) || (rstat == I40E_VFR_COMPLETED)) return 0; - udelay(10); + usleep_range(10, 20); } return -EBUSY; } @@ -2022,7 +2020,7 @@ static void i40evf_init_task(struct work_struct *work) err = i40evf_check_reset_complete(hw); if (err) { dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", - err); + err); goto err; } hw->aq.num_arq_entries = I40EVF_AQ_LEN; @@ -2047,6 +2045,8 @@ static void i40evf_init_task(struct work_struct *work) case __I40EVF_INIT_VERSION_CHECK: if (!i40evf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; goto err; } @@ -2054,7 +2054,7 @@ static void i40evf_init_task(struct work_struct *work) err = i40evf_verify_api_ver(adapter); if (err) { dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n", - err); + err); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { dev_info(&pdev->dev, "Resending request\n"); err = i40evf_send_api_ver(adapter); @@ -2080,8 +2080,11 @@ static void i40evf_init_task(struct work_struct *work) goto err; } err = i40evf_get_vf_config(adapter); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto restart; + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { + dev_info(&pdev->dev, "Resending VF config request\n"); + err = i40evf_send_vf_config_msg(adapter); + goto err; + } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); @@ -2138,7 +2141,7 @@ static void i40evf_init_task(struct work_struct *work) ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (NULL == f) + if (!f) goto err_sw_init; ether_addr_copy(f->macaddr, adapter->hw.mac.addr); @@ -2152,6 +2155,9 @@ static void i40evf_init_task(struct work_struct *work) adapter->watchdog_timer.data = (unsigned long)adapter; mod_timer(&adapter->watchdog_timer, jiffies + 1); + adapter->num_active_queues = min_t(int, + adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); adapter->tx_desc_count = I40EVF_DEFAULT_TXD; adapter->rx_desc_count = I40EVF_DEFAULT_RXD; err = i40evf_init_interrupt_scheme(adapter); @@ -2500,8 +2506,9 @@ static struct pci_driver i40evf_driver = { static int __init i40evf_init_module(void) { int ret; + pr_info("i40evf: %s - version %s\n", i40evf_driver_string, - i40evf_driver_version); + i40evf_driver_version); pr_info("%s\n", i40evf_copyright); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 66d12f5b4ca83d0de07c7b8fbc567ccaa6def1dd..5fde5a7f4591053497d26d2423cf031fc6a9523a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -89,27 +89,37 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) struct i40e_virtchnl_version_info *pf_vvi; struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; + enum i40e_virtchnl_ops op; i40e_status err; - event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; goto out; } - err = i40evf_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto out_alloc; + while (1) { + err = i40evf_clean_arq_element(hw, &event, NULL); + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_VERSION) + break; + } + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); if (err) goto out_alloc; - if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != - I40E_VIRTCHNL_OP_VERSION) { + if (op != I40E_VIRTCHNL_OP_VERSION) { dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", - le32_to_cpu(event.desc.cookie_high)); + op); err = -EIO; goto out_alloc; } @@ -153,42 +163,34 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - u16 len; + enum i40e_virtchnl_ops op; i40e_status err; + u16 len; len = sizeof(struct i40e_virtchnl_vf_resource) + I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); - event.msg_size = len; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = len; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; goto out; } - err = i40evf_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto out_alloc; - - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); - if (err) { - dev_err(&adapter->pdev->dev, - "%s: Error returned from PF, %d, %d\n", __func__, - le32_to_cpu(event.desc.cookie_high), - le32_to_cpu(event.desc.cookie_low)); - err = -EIO; - goto out_alloc; + while (1) { + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + err = i40evf_clean_arq_element(hw, &event, NULL); + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES) + break; } - if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != - I40E_VIRTCHNL_OP_GET_VF_RESOURCES) { - dev_err(&adapter->pdev->dev, - "%s: Invalid response from PF, %d, %d\n", __func__, - le32_to_cpu(event.desc.cookie_high), - le32_to_cpu(event.desc.cookie_low)); - err = -EIO; - goto out_alloc; - } - memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len)); + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); i40e_vf_parse_hw_config(hw, adapter->vf_res); out_alloc: @@ -207,7 +209,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) { struct i40e_virtchnl_vsi_queue_config_info *vqci; struct i40e_virtchnl_queue_pair_info *vqpi; - int pairs = adapter->vsi_res->num_queue_pairs; + int pairs = adapter->num_active_queues; int i, len; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { @@ -273,7 +275,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; @@ -299,7 +301,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; @@ -393,7 +395,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -454,7 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -516,7 +518,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -576,7 +578,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -635,6 +637,7 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) void i40evf_request_stats(struct i40evf_adapter *adapter) { struct i40e_virtchnl_queue_select vqs; + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* no error message, this isn't crucial */ return; @@ -709,19 +712,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, "%s: Unknown event %d from pf\n", __func__, vpe->event); break; - } return; } - if (v_opcode != adapter->current_op) { - dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n", - __func__, adapter->current_op, v_opcode); - /* We're probably completely screwed at this point, but clear - * the current op and try to carry on.... - */ - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; - return; - } + if (v_opcode != adapter->current_op) + dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n", + adapter->current_op, v_opcode); if (v_retval) { dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", __func__, v_retval, v_opcode); @@ -773,8 +769,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); break; default: - dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n", - __func__, v_opcode); + dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n", + v_opcode); break; } /* switch v_opcode */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 02cfd3b14762cbf884ef1b0f4586735f14bee3c9..d5673eb90c542c75f3ae4903997afc736a7d7956 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2842,11 +2842,16 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev) return IGB_RETA_SIZE; } -static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct igb_adapter *adapter = netdev_priv(netdev); int i; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; for (i = 0; i < IGB_RETA_SIZE; i++) indir[i] = adapter->rss_indir_tbl[i]; @@ -2889,13 +2894,20 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter) } static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int i; u32 num_queues; + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + num_queues = adapter->rss_queues; switch (hw->mac.type) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b85880a6e4c4646a43179cc0d33717b27e675b47..2e526d4904a6912638030297a9f60abd792d5f53 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3373,14 +3373,11 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; u32 j, num_rx_queues; - static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, - 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, - 0xA32DCB77, 0x0CF23080, 0x3BB7426A, - 0xFA01ACBE }; + u32 rss_key[10]; - /* Fill out hash function seeds */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (j = 0; j < 10; j++) - wr32(E1000_RSSRK(j), rsskey[j]); + wr32(E1000_RSSRK(j), rss_key[j]); num_rx_queues = adapter->rss_queues; @@ -5092,12 +5089,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, /* The minimum packet size with TCTL.PSP set is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); } @@ -6649,8 +6642,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, #endif /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IGB_RX_HDR_LEN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); if (unlikely(!skb)) { rx_ring->rx_stats.alloc_failed++; return NULL; @@ -6851,14 +6843,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, if (skb_is_nonlinear(skb)) igb_pull_tail(rx_ring, rx_desc, skb); - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } @@ -6993,7 +6980,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; /* alloc new page for storage */ - page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL); + page = dev_alloc_page(); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 055961b0f24b05abdc66f2bdab71e83093e9cdad..aa87605b144a23a98d3ed7121c035c8703ad4627 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, * this should improve performance for small packets with large amounts * of reassembly being done in the stack */ -static void ixgb_check_copybreak(struct net_device *netdev, +static void ixgb_check_copybreak(struct napi_struct *napi, struct ixgb_buffer *buffer_info, u32 length, struct sk_buff **skb) { @@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev, if (length > copybreak) return; - new_skb = netdev_alloc_skb_ip_align(netdev, length); + new_skb = napi_alloc_skb(napi, length); if (!new_skb) return; @@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) goto rxdesc_done; } - ixgb_check_copybreak(netdev, buffer_info, length, &skb); + ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb); /* Good Receive */ skb_put(skb, length); diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index be2989e600099266846fe8bf78487f9b3de63309..35e6fa643c7ebc8bfc9905d01f95313fb7a2d2e1 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5032a602d5c98265d63037d6aac1e1bfc3188c76..b6137be43920ddd47f27b2684250105a4b575e28 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -300,16 +300,17 @@ enum ixgbe_ring_f_enum { RING_F_ARRAY_SIZE /* must be last in enum set */ }; -#define IXGBE_MAX_RSS_INDICES 16 -#define IXGBE_MAX_VMDQ_INDICES 64 -#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ -#define IXGBE_MAX_FCOE_INDICES 8 -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define IXGBE_MAX_L2A_QUEUES 4 -#define IXGBE_BAD_L2A_QUEUE 3 -#define IXGBE_MAX_MACVLANS 31 -#define IXGBE_MAX_DCBMACVLANS 8 +#define IXGBE_MAX_RSS_INDICES 16 +#define IXGBE_MAX_RSS_INDICES_X550 64 +#define IXGBE_MAX_VMDQ_INDICES 64 +#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ +#define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define IXGBE_MAX_L2A_QUEUES 4 +#define IXGBE_BAD_L2A_QUEUE 3 +#define IXGBE_MAX_MACVLANS 31 +#define IXGBE_MAX_DCBMACVLANS 8 struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ @@ -553,11 +554,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } -static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value) -{ - writel(value, ring->tail); -} - #define IXGBE_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) #define IXGBE_TX_DESC(R, i) \ @@ -769,6 +765,21 @@ struct ixgbe_adapter { unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ }; +static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + return IXGBE_MAX_RSS_INDICES; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + return IXGBE_MAX_RSS_INDICES_X550; + default: + return 0; + } +} + struct ixgbe_fdir_filter { struct hlist_node fdir_node; union ixgbe_atr_input filter; @@ -804,11 +815,15 @@ enum ixgbe_boards { board_82598, board_82599, board_X540, + board_X550, + board_X550EM_x, }; extern struct ixgbe_info ixgbe_82598_info; extern struct ixgbe_info ixgbe_82599_info; extern struct ixgbe_info ixgbe_X540_info; +extern struct ixgbe_info ixgbe_X550_info; +extern struct ixgbe_info ixgbe_X550EM_x_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index b5f484bf3fdadd599517a4b549e08a776c596ec8..9c66babd4edd6116139575fe4fab3cf37d8470e9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1625,7 +1625,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum * @hw: pointer to hardware structure **/ -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -1636,7 +1636,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (hw->eeprom.ops.read(hw, i, &word) != 0) { + if (hw->eeprom.ops.read(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); break; } @@ -1645,24 +1645,35 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) /* Include all data from pointers except for the fw pointer */ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { - hw->eeprom.ops.read(hw, i, &pointer); + if (hw->eeprom.ops.read(hw, i, &pointer)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* If the pointer seems invalid */ + if (pointer == 0xFFFF || pointer == 0) + continue; + + if (hw->eeprom.ops.read(hw, pointer, &length)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } - /* Make sure the pointer seems valid */ - if (pointer != 0xFFFF && pointer != 0) { - hw->eeprom.ops.read(hw, pointer, &length); + if (length == 0xFFFF || length == 0) + continue; - if (length != 0xFFFF && length != 0) { - for (j = pointer+1; j <= pointer+length; j++) { - hw->eeprom.ops.read(hw, j, &word); - checksum += word; - } + for (j = pointer + 1; j <= pointer + length; j++) { + if (hw->eeprom.ops.read(hw, j, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; } + checksum += word; } } checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return checksum; + return (s32)checksum; } /** @@ -1686,26 +1697,33 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } - if (status == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; - hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + checksum = (u16)(status & 0xffff); - /* - * Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) - status = IXGBE_ERR_EEPROM_CHECKSUM; - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - } else { + status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) { hw_dbg(hw, "EEPROM read failed\n"); + return status; } + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + return status; } @@ -1724,15 +1742,19 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); - status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, - checksum); - } else { + if (status) { hw_dbg(hw, "EEPROM read failed\n"); + return status; } + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); + return status; } @@ -2469,7 +2491,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * Acquires the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr = 0; u32 swmask = mask; @@ -2514,7 +2536,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) * Releases the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr; u32 swmask = mask; @@ -2799,6 +2821,8 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; @@ -3192,17 +3216,27 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *link_up = false; } - if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_10G_82599) - *speed = IXGBE_LINK_SPEED_10GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_1G_82599) + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_100_82599) - *speed = IXGBE_LINK_SPEED_100_FULL; - else + break; + case IXGBE_LINKS_SPEED_100_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + default: *speed = IXGBE_LINK_SPEED_UNKNOWN; + } return 0; } @@ -3434,23 +3468,34 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * @buffer: contains the command to write and where the return status will * be placed * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. * * Communicates with the manageability block. On success return 0 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. **/ -static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length) +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, + bool return_data) { - u32 hicr, i, bi; + u32 hicr, i, bi, fwsts; u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - u8 buf_len, dword_len; + u16 buf_len, dword_len; - if (length == 0 || length & 0x3 || - length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure.\n"); + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); + IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); + /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if ((hicr & IXGBE_HICR_EN) == 0) { @@ -3458,7 +3503,12 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, return IXGBE_ERR_HOST_INTERFACE_COMMAND; } - /* Calculate length in DWORDs */ + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + hw_dbg(hw, "Buffer length failure, not aligned to dword"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + dword_len = length >> 2; /* @@ -3472,7 +3522,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); - for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { + for (i = 0; i < timeout; i++) { hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_C)) break; @@ -3480,12 +3530,15 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, } /* Check command successful completion. */ - if (i == IXGBE_HI_COMMAND_TIMEOUT || + if ((timeout != 0 && i == timeout) || (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { hw_dbg(hw, "Command has failed with no status valid.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + if (!return_data) + return 0; + /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; @@ -3556,7 +3609,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, - sizeof(fw_cmd)); + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); if (ret_val != 0) continue; @@ -3583,7 +3638,8 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, **/ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) { - u32 gcr_ext, hlreg0; + u32 gcr_ext, hlreg0, i, poll; + u16 value; /* * If double reset is not requested then all transactions should @@ -3600,6 +3656,23 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + /* wait for a last completion before clearing buffers */ + IXGBE_WRITE_FLUSH(hw); + usleep_range(3000, 6000); + + /* Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usleep_range(100, 200); + value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); + if (ixgbe_removed(hw->hw_addr)) + break; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + break; + } + /* initiate cleaning flow for buffers in the PCIe transaction layer */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2ae5d4b8fc93e318bba749c6042affb8eb1616a7..8cfadcb2676ed3a30386927b359e70408e692d9a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -64,7 +64,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val); s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); @@ -84,8 +84,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); @@ -110,6 +110,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 48f35fc963f89b5d3e7f8260a3a916970493c260..a507a6fe36243a32bc27409998fc25660c105ed5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -286,6 +286,8 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, bwgid, ptype); case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, bwgid, ptype, prio_tc); default: @@ -302,6 +304,8 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) return ixgbe_dcb_config_pfc_82598(hw, pfc_en); case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); default: break; @@ -357,6 +361,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, @@ -385,6 +391,8 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_dcb_read_rtrup2tc_82599(hw, map); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 58a7f5312a96aec7e0ca8473db835de844c00a26..2707bda374187fe3bdbb961873cc55b023b3a2e5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -180,6 +180,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 0ae038b9af90cfc7dc15bd01c0b4014e37fa1911..e5be0dd508deab592d5219d031ccaff8f2662e9b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -511,6 +511,8 @@ static void ixgbe_get_regs(struct net_device *netdev, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); break; @@ -622,6 +624,8 @@ static void ixgbe_get_regs(struct net_device *netdev, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); for (i = 0; i < 8; i++) @@ -1406,6 +1410,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: toggle = 0x7FFFF30F; test = reg_test_82599; break; @@ -1644,6 +1650,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_ctl &= ~IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); @@ -1680,6 +1688,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1733,12 +1743,16 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); - /* X540 needs to set the MACC.FLU bit to force link up */ - if (adapter->hw.mac.type == ixgbe_mac_X540) { + /* X540 and X550 needs to set the MACC.FLU bit to force link up */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); - } else { + break; + default: if (hw->mac.orig_autoc) { reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); @@ -2776,7 +2790,14 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, /* if we changed something we need to update flags */ if (flags2 != adapter->flags2) { struct ixgbe_hw *hw = &adapter->hw; - u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + u32 mrqc; + unsigned int pf_pool = adapter->num_vfs; + + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool)); + else + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); if ((flags2 & UDP_RSS_FLAGS) && !(adapter->flags2 & UDP_RSS_FLAGS)) @@ -2799,7 +2820,11 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc); + else + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } return 0; @@ -2833,6 +2858,8 @@ static int ixgbe_get_ts_info(struct net_device *dev, struct ixgbe_adapter *adapter = netdev_priv(dev); switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = @@ -2900,7 +2927,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) max_combined = IXGBE_MAX_FDIR_INDICES; } else { /* support up to 16 queues with RSS */ - max_combined = IXGBE_MAX_RSS_INDICES; + max_combined = ixgbe_max_rss_indices(adapter); } return max_combined; @@ -2948,6 +2975,7 @@ static int ixgbe_set_channels(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); unsigned int count = ch->combined_count; + u8 max_rss_indices = ixgbe_max_rss_indices(adapter); /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) @@ -2964,9 +2992,9 @@ static int ixgbe_set_channels(struct net_device *dev, /* update feature limits from largest to smallest supported values */ adapter->ring_feature[RING_F_FDIR].limit = count; - /* cap RSS limit at 16 */ - if (count > IXGBE_MAX_RSS_INDICES) - count = IXGBE_MAX_RSS_INDICES; + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; adapter->ring_feature[RING_F_RSS].limit = count; #ifdef IXGBE_FCOE diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index ce40c77381e9d957de1e06cc86ffabad8b6998ce..68e1e757ecefce38e456c5cf2ae017ddea2ed054 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -126,6 +126,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cc51554c9e99a49e74c24ab7bf8f97848a06d7a3..798b05556e1bcced693bc680ac076992e1a59e85 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -50,6 +51,15 @@ #include #include +#ifdef CONFIG_OF +#include +#endif + +#ifdef CONFIG_SPARC +#include +#include +#endif + #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" @@ -65,15 +75,17 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "3.19.1-k" +#define DRV_VERSION "4.0.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2014 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_82598] = &ixgbe_82598_info, - [board_82599] = &ixgbe_82599_info, - [board_X540] = &ixgbe_X540_info, + [board_82598] = &ixgbe_82598_info, + [board_82599] = &ixgbe_82599_info, + [board_X540] = &ixgbe_X540_info, + [board_X550] = &ixgbe_X550_info, + [board_X550EM_x] = &ixgbe_X550EM_x_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -115,6 +127,9 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, /* required last entry */ {0, } }; @@ -835,6 +850,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -871,6 +888,8 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -1412,41 +1431,21 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } -static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) -{ - rx_ring->next_to_use = val; - - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = val; - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - ixgbe_write_tail(rx_ring, val); -} - static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *bi) { struct page *page = bi->page; - dma_addr_t dma = bi->dma; + dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ - if (likely(dma)) + if (likely(page)) return true; /* alloc new page for storage */ - if (likely(!page)) { - page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, - bi->skb, ixgbe_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_rx_page_failed++; - return false; - } - bi->page = page; + page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; } /* map page for use */ @@ -1459,13 +1458,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, */ if (dma_mapping_error(rx_ring->dev, dma)) { __free_pages(page, ixgbe_rx_pg_order(rx_ring)); - bi->page = NULL; rx_ring->rx_stats.alloc_rx_page_failed++; return false; } bi->dma = dma; + bi->page = page; bi->page_offset = 0; return true; @@ -1509,16 +1508,28 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) i -= rx_ring->count; } - /* clear the hdr_addr for the next_to_use descriptor */ - rx_desc->read.hdr_addr = 0; + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; cleaned_count--; } while (cleaned_count); i += rx_ring->count; - if (rx_ring->next_to_use != i) - ixgbe_release_rx_desc(rx_ring, i); + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } } static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, @@ -1763,14 +1774,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, return false; #endif - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } @@ -1795,9 +1801,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - new_buff->page = old_buff->page; - new_buff->dma = old_buff->dma; - new_buff->page_offset = old_buff->page_offset; + *new_buff = *old_buff; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, @@ -1806,6 +1810,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, DMA_FROM_DEVICE); } +static inline bool ixgbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + /** * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -1841,12 +1850,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - /* we can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(page) == numa_node_id())) + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ixgbe_page_is_reserved(page))) return true; /* this page cannot be reused so discard it */ - put_page(page); + __free_pages(page, ixgbe_rx_pg_order(rx_ring)); return false; } @@ -1854,7 +1863,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, rx_buffer->page_offset, size, truesize); /* avoid re-using remote pages */ - if (unlikely(page_to_nid(page) != numa_node_id())) + if (unlikely(ixgbe_page_is_reserved(page))) return false; #if (PAGE_SIZE < 8192) @@ -1864,22 +1873,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, /* flip page offset to other buffer */ rx_buffer->page_offset ^= truesize; - - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - atomic_inc(&page->_count); #else /* move offset up to the next cache line */ rx_buffer->page_offset += truesize; if (rx_buffer->page_offset > last_offset) return false; - - /* bump ref count on page before it is given to the stack */ - get_page(page); #endif + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + return true; } @@ -1907,8 +1913,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, #endif /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IXGBE_RX_HDR_SIZE); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IXGBE_RX_HDR_SIZE); if (unlikely(!skb)) { rx_ring->rx_stats.alloc_rx_buff_failed++; return NULL; @@ -1942,6 +1948,8 @@ dma_sync: rx_buffer->page_offset, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; } /* pull page into skb */ @@ -1959,8 +1967,6 @@ dma_sync: } /* clear contents of buffer_info */ - rx_buffer->skb = NULL; - rx_buffer->dma = 0; rx_buffer->page = NULL; return skb; @@ -2155,6 +2161,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2264,6 +2272,8 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2467,6 +2477,8 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -2493,6 +2505,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); @@ -2525,6 +2539,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, mask |= IXGBE_EIMS_GPI_SDP0; break; case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask |= IXGBE_EIMS_TS; break; default: @@ -2536,7 +2552,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_GPI_SDP1; mask |= IXGBE_EIMS_GPI_SDP2; + /* fall through */ case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_MAILBOX; break; @@ -2544,9 +2563,6 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, break; } - if (adapter->hw.mac.type == ixgbe_mac_X540) - mask |= IXGBE_EIMS_TIMESYNC; - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) mask |= IXGBE_EIMS_FLOW_DIR; @@ -2592,6 +2608,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -2811,6 +2829,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_sfp_event(adapter, eicr); /* Fall through */ case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -2905,6 +2925,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -3190,16 +3212,14 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } -static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed) { struct ixgbe_hw *hw = &adapter->hw; - static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, - 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, - 0x6A3E67EA, 0x14364D17, 0x3BED200D}; - u32 mrqc = 0, reta = 0; - u32 rxcsum; + u32 reta = 0; int i, j; + int reta_entries = 128; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + int indices_multi; /* * Program table for at least 2 queues w/ SR-IOV so that VFs can @@ -3213,16 +3233,69 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); + /* Fill out the redirection table as follows: + * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices + * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index + * X550: 512 (8 bit wide) entries containing 6 bit RSS index + */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + indices_multi = 0x11; + else + indices_multi = 0x1; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + reta_entries = 512; + default: + break; + } + /* Fill out redirection table */ - for (i = 0, j = 0; i < 128; i++, j++) { + for (i = 0, j = 0; i < reta_entries; i++, j++) { if (j == rss_i) j = 0; - /* reta = 4-byte sliding window of - * 0x00..(indices-1)(indices-1)00..etc. */ - reta = (reta << 8) | (j * 0x11); + reta = (reta << 8) | (j * indices_multi); + if ((i & 3) == 3) { + if (i < 128) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + else + IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), + reta); + } + } +} + +static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vfreta = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + int i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]); + + /* Fill out the redirection table */ + for (i = 0, j = 0; i < 64; i++, j++) { + if (j == rss_i) + j = 0; + vfreta = (vfreta << 8) | j; if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + vfreta); } +} + +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 mrqc = 0, rss_field = 0, vfmrqc = 0; + u32 rss_key[10]; + u32 rxcsum; /* Disable indicating checksum in descriptor, enables RSS hash */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); @@ -3255,17 +3328,35 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) } /* Perform hash on these packet types */ - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | - IXGBE_MRQC_RSS_FIELD_IPV4_TCP | - IXGBE_MRQC_RSS_FIELD_IPV6 | - IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | + IXGBE_MRQC_RSS_FIELD_IPV4_TCP | + IXGBE_MRQC_RSS_FIELD_IPV6 | + IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + unsigned int pf_pool = adapter->num_vfs; + + /* Enable VF RSS mode */ + mrqc |= IXGBE_MRQC_MULTIPLE_RSS; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* Setup RSS through the VF registers */ + ixgbe_setup_vfreta(adapter, rss_key); + vfmrqc = IXGBE_MRQC_RSSEN; + vfmrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); + } else { + ixgbe_setup_reta(adapter, rss_key); + mrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + } } /** @@ -3534,6 +3625,8 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_82598EB: /* * For VMDq support of different descriptor types or @@ -3657,6 +3750,8 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -3691,6 +3786,8 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4112,6 +4209,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -4170,6 +4269,8 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -4308,29 +4409,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer; + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - rx_buffer = &rx_ring->rx_buffer_info[i]; if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; - if (IXGBE_CB(skb)->page_released) { + if (IXGBE_CB(skb)->page_released) dma_unmap_page(dev, IXGBE_CB(skb)->dma, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; - } dev_kfree_skb(skb); rx_buffer->skb = NULL; } - if (rx_buffer->dma) - dma_unmap_page(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - rx_buffer->dma = 0; - if (rx_buffer->page) - __free_pages(rx_buffer->page, - ixgbe_rx_pg_order(rx_ring)); + + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); + rx_buffer->page = NULL; } @@ -4606,6 +4704,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -4948,10 +5048,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } - /* Disable the Tx DMA engine on 82599 and X540 */ + /* Disable the Tx DMA engine on 82599 and later MAC */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -5016,7 +5118,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) hw->subsystem_device_id = pdev->subsystem_device; /* Set common capability flags and settings */ - rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; @@ -5071,6 +5173,12 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550: +#ifdef CONFIG_IXGBE_DCA + adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; +#endif + break; default: break; } @@ -5086,6 +5194,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCB switch (hw->mac.type) { case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; break; @@ -5675,6 +5785,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -5806,6 +5918,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -5819,7 +5933,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540)) { + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -5842,7 +5958,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); break; case ixgbe_mac_X540: - /* OS2BMC stats are X540 only*/ + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* OS2BMC stats are X540 and later */ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); @@ -6110,6 +6228,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) } break; case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); @@ -6221,6 +6341,10 @@ static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) if (!adapter->num_vfs) return false; + /* resetting the PF is only needed for MAC before X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + return false; + for (i = 0; i < adapter->num_vfs; i++) { for (j = 0; j < q_per_pool; j++) { u32 h, t; @@ -6256,6 +6380,66 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) } } +#ifdef CONFIG_PCI_IOV +static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, + struct pci_dev *vfdev) +{ + if (!pci_wait_for_pending_transaction(vfdev)) + e_dev_warn("Issuing VFLR with pending transactions\n"); + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + + msleep(100); +} + +static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; + u32 gpc; + int pos; + unsigned short vf_id; + + if (!(netif_carrier_ok(adapter->netdev))) + return; + + gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); + if (gpc) /* If incrementing then no need for the check below */ + return; + /* Check to see if a bad DMA write target from an errant or + * malicious VF has caused a PCIe error. If so then we can + * issue a VFLR to the offending VF(s) and then resume without + * requesting a full slot reset. + */ + + if (!pdev) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + + /* get the device ID for the VF */ + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + /* check status reg for all VFs owned by this PF */ + vfdev = pci_get_device(pdev->vendor, vf_id, NULL); + while (vfdev) { + if (vfdev->is_virtfn && (vfdev->physfn == pdev)) { + u16 status_reg; + + pci_read_config_word(vfdev, PCI_STATUS, &status_reg); + if (status_reg & PCI_STATUS_REC_MASTER_ABORT) + /* issue VFLR */ + ixgbe_issue_vf_flr(adapter, vfdev); + } + + vfdev = pci_get_device(pdev->vendor, vf_id, vfdev); + } +} + static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) { u32 ssvpc; @@ -6276,6 +6460,17 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) e_warn(drv, "%u Spoofed packets detected\n", ssvpc); } +#else +static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) +{ +} + +static void +ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) +{ +} +#endif /* CONFIG_PCI_IOV */ + /** * ixgbe_watchdog_subtask - check and bring link up @@ -6296,6 +6491,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) else ixgbe_watchdog_link_is_down(adapter); + ixgbe_check_for_bad_vf(adapter); ixgbe_spoof_check(adapter); ixgbe_update_stats(adapter); @@ -6407,51 +6603,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } -#ifdef CONFIG_PCI_IOV -static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) -{ - int vf; - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 gpc; - u32 ciaa, ciad; - - gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); - if (gpc) /* If incrementing then no need for the check below */ - return; - /* - * Check to see if a bad DMA write target from an errant or - * malicious VF has caused a PCIe error. If so then we can - * issue a VFLR to the offending VF(s) and then resume without - * requesting a full slot reset. - */ - - for (vf = 0; vf < adapter->num_vfs; vf++) { - ciaa = (vf << 16) | 0x80000000; - /* 32 bit read so align, we really want status at offset 6 */ - ciaa |= PCI_COMMAND; - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599); - ciaa &= 0x7FFFFFFF; - /* disable debug mode asap after reading data */ - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - /* Get the upper 16 bits which will be the PCI status reg */ - ciad >>= 16; - if (ciad & PCI_STATUS_REC_MASTER_ABORT) { - netdev_err(netdev, "VF %d Hung DMA\n", vf); - /* Issue VFLR */ - ciaa = (vf << 16) | 0x80000000; - ciaa |= 0xA8; - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - ciad = 0x00008000; /* VFLR */ - IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad); - ciaa &= 0x7FFFFFFF; - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - } - } -} - -#endif /** * ixgbe_service_timer - Timer Call-back * @data: pointer to adapter cast into an unsigned long @@ -6460,7 +6611,6 @@ static void ixgbe_service_timer(unsigned long data) { struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; unsigned long next_event_offset; - bool ready = true; /* poll faster when waiting for link */ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) @@ -6468,32 +6618,10 @@ static void ixgbe_service_timer(unsigned long data) else next_event_offset = HZ * 2; -#ifdef CONFIG_PCI_IOV - /* - * don't bother with SR-IOV VF DMA hang check if there are - * no VFs or the link is down - */ - if (!adapter->num_vfs || - (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) - goto normal_timer_service; - - /* If we have VFs allocated then we must check for DMA hangs */ - ixgbe_check_for_bad_vf(adapter); - next_event_offset = HZ / 50; - adapter->timer_event_accumulator++; - - if (adapter->timer_event_accumulator >= 100) - adapter->timer_event_accumulator = 0; - else - ready = false; - -normal_timer_service: -#endif /* Reset the timer */ mod_timer(&adapter->service_timer, next_event_offset + jiffies); - if (ready) - ixgbe_service_event_schedule(adapter); + ixgbe_service_event_schedule(adapter); } static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) @@ -6898,8 +7026,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { - /* notify HW of packet */ - ixgbe_write_tail(tx_ring, i); + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); } return; @@ -7197,12 +7329,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, * The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; @@ -7646,7 +7774,7 @@ static int ixgbe_set_features(struct net_device *netdev, static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, + const unsigned char *addr, u16 vid, u16 flags) { /* guarantee we can provide a unique filter for the unicast address */ @@ -7655,7 +7783,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -ENOMEM; } - return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } static int ixgbe_ndo_bridge_setlink(struct net_device *dev, @@ -7716,7 +7844,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, else mode = BRIDGE_MODE_VEPA; - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) @@ -7964,6 +8092,29 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, return is_wol_supported; } +/** + * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM + * @adapter: Pointer to adapter struct + */ +static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_OF + struct device_node *dp = pci_device_to_OF_node(adapter->pdev); + struct ixgbe_hw *hw = &adapter->hw; + const unsigned char *addr; + + addr = of_get_mac_address(dp); + if (addr) { + ether_addr_copy(hw->mac.perm_addr, addr); + return; + } +#endif /* CONFIG_OF */ + +#ifdef CONFIG_SPARC + ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr); +#endif /* CONFIG_SPARC */ +} + /** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -8046,7 +8197,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); - pci_set_drvdata(pdev, adapter); adapter->netdev = netdev; adapter->pdev = pdev; @@ -8104,6 +8254,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -8167,6 +8319,8 @@ skip_sriov: switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: netdev->features |= NETIF_F_SCTP_CSUM; netdev->hw_features |= NETIF_F_SCTP_CSUM | NETIF_F_NTUPLE; @@ -8229,6 +8383,8 @@ skip_sriov: goto err_sw_init; } + ixgbe_get_platform_mac_addr(adapter); + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) { @@ -8320,6 +8476,8 @@ skip_sriov: if (err) goto err_register; + pci_set_drvdata(pdev, adapter); + /* power down the optics for 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); @@ -8399,9 +8557,14 @@ err_dma: static void ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; + struct net_device *netdev; bool disable_dev; + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; ixgbe_dbg_adapter_exit(adapter); set_bit(__IXGBE_REMOVING, &adapter->state); @@ -8523,6 +8686,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_X540: device_id = IXGBE_X540_VF_DEVICE_ID; break; + case ixgbe_mac_X550: + device_id = IXGBE_DEV_ID_X550_VF; + break; + case ixgbe_mac_X550EM_x: + device_id = IXGBE_DEV_ID_X550EM_X_VF; + break; default: device_id = 0; break; @@ -8542,8 +8711,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, * VFLR. Just clean up the AER in that case. */ if (vfdev) { - e_dev_err("Issuing VFLR to VF %d\n", vf); - pci_write_config_dword(vfdev, 0xA8, 0x00008000); + ixgbe_issue_vf_flr(adapter, vfdev); /* Free device reference count */ pci_dev_put(vfdev); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index cc8f0128286c19583c743992e0f33eacad3625df..9993a471d668cea9ce8e491def02bc2d3531cff6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -305,6 +305,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); break; case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: @@ -426,6 +428,8 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) struct ixgbe_mbx_info *mbx = &hw->mbx; if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X540) return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 28b81ae09b5ae2288e8d05fac82176a8d504fd18..8a2be444113dd65044d30f7831a81ba1d118f58f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -43,12 +43,194 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); -static bool ixgbe_get_i2c_data(u32 *i2cctl); +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); +/** + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send + * + * Returns an error code on error. + **/ +static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) +{ + s32 status; + + status = ixgbe_clock_out_i2c_byte(hw, byte); + if (status) + return status; + return ixgbe_get_i2c_ack(hw); +} + +/** + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte + * + * Returns an error code on error. + **/ +static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) +{ + s32 status; + + status = ixgbe_clock_in_i2c_byte(hw, byte); + if (status) + return status; + /* ACK */ + return ixgbe_clock_out_i2c_bit(hw, false); +} + +/** + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1: addend 1 + * @add2: addend 2 + * + * Returns one's complement 8-bit sum. + **/ +static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) +{ + u16 sum = add1 + add2; + + sum = (sum & 0xFF) + (sum >> 8); + return sum & 0xFF; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 10; + int retry = 0; + u8 csum_byte; + u8 high_bits; + u8 low_bits; + u8 reg_high; + u8 csum; + + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ~csum; + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + /* Re-start condition */ + ixgbe_i2c_start(hw); + /* Device Address and read indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) + goto fail; + /* Get upper bits */ + if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) + goto fail; + /* Get low bits */ + if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) + goto fail; + /* Get csum */ + if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) + goto fail; + /* NACK */ + if (ixgbe_clock_out_i2c_bit(hw, false)) + goto fail; + ixgbe_i2c_stop(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + *val = (high_bits << 8) | low_bits; + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte read combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte read combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + int max_retry = 1; + int retry = 0; + u8 reg_high; + u8 csum; + + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ixgbe_ones_comp_byte_add(csum, val >> 8); + csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); + csum = ~csum; + do { + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write data 15:8 */ + if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) + goto fail; + /* Write data 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + ixgbe_i2c_stop(hw); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte write combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte write combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + /** * ixgbe_identify_phy_generic - Get physical layer module * @hw: pointer to hardware structure @@ -60,6 +242,15 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) u32 phy_addr; u16 ext_ability = 0; + if (!hw->phy.phy_semaphore_mask) { + hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) & + IXGBE_STATUS_LAN_ID_1; + if (hw->phy.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + if (hw->phy.type == ixgbe_phy_unknown) { for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { hw->phy.mdio.prtad = phy_addr; @@ -315,12 +506,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { s32 status; - u16 gssr; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; + u32 gssr = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, @@ -418,7 +604,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { s32 status; - u16 gssr; + u32 gssr; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) gssr = IXGBE_GSSR_PHY1_SM; @@ -576,6 +762,10 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, *speed |= IXGBE_LINK_SPEED_100_FULL; } + /* Internal PHY does not support 100 Mbps */ + if (hw->mac.type == ixgbe_mac_X550EM_x) + *speed &= ~IXGBE_LINK_SPEED_100_FULL; + return status; } @@ -632,6 +822,9 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * @hw: pointer to hardware structure * * Restart autonegotiation and PHY and waits for completion. + * This function always returns success, this is nessary since + * it is called via a function pointer that could call other + * functions that could return an error. **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { @@ -1462,15 +1655,10 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, s32 status; u32 max_retry = 10; u32 retry = 0; - u16 swfw_mask = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; *data = 0; - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; - do { if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; @@ -1548,12 +1736,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, s32 status; u32 max_retry = 1; u32 retry = 0; - u16 swfw_mask = 0; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; + u32 swfw_mask = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; @@ -1610,7 +1793,7 @@ fail: **/ static void ixgbe_i2c_start(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); @@ -1639,7 +1822,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw) **/ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); @@ -1697,9 +1880,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) } /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - i2cctl |= IXGBE_I2C_DATA_OUT; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; @@ -1715,7 +1898,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { s32 status = 0; u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 timeout = 10; bool ack = true; @@ -1728,8 +1911,8 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - ack = ixgbe_get_i2c_data(&i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + ack = ixgbe_get_i2c_data(hw, &i2cctl); udelay(1); if (ack == 0) @@ -1758,15 +1941,15 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) **/ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - *data = ixgbe_get_i2c_data(&i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); @@ -1786,7 +1969,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); status = ixgbe_set_i2c_data(hw, &i2cctl, data); if (status == 0) { @@ -1822,14 +2005,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) u32 i2cctl_r = 0; for (i = 0; i < timeout; i++) { - *i2cctl |= IXGBE_I2C_CLK_OUT; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ udelay(IXGBE_I2C_T_RISE); - i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - if (i2cctl_r & IXGBE_I2C_CLK_IN) + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) break; } } @@ -1844,9 +2027,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { - *i2cctl &= ~IXGBE_I2C_CLK_OUT; + *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ @@ -1864,19 +2047,19 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT; + *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); else - *i2cctl &= ~IXGBE_I2C_DATA_OUT; + *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - if (data != ixgbe_get_i2c_data(i2cctl)) { + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (data != ixgbe_get_i2c_data(hw, i2cctl)) { hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); return IXGBE_ERR_I2C; } @@ -1891,9 +2074,9 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) * * Returns the I2C data bit value **/ -static bool ixgbe_get_i2c_data(u32 *i2cctl) +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { - if (*i2cctl & IXGBE_I2C_DATA_IN) + if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) return true; return false; } @@ -1907,7 +2090,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl) **/ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 i; ixgbe_i2c_start(hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 54071ed17e3b3d589d8250f4536f622e796049dc..43464388128787116f4c1f6a37943f946fd9cdab 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -77,6 +77,11 @@ #define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 + /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 #define IXGBE_TAF_ASM_PAUSE 0x800 @@ -110,7 +115,6 @@ /* SFP+ SFF-8472 Compliance code */ #define IXGBE_SFF_SFF_8472_UNSUP 0x00 -s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, @@ -157,4 +161,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data); s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val); +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val); #endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 97c85b859536bf1bfd0aa7b3e323dfd7a28e00b1..c76ba90ecc6ecc44a6899fa487eda911b5a52d5a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -221,7 +221,8 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); } else { rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); } @@ -618,6 +619,27 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) return 0; } +static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + int i; + + for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { + u32 reg; + + /* flush previous write */ + IXGBE_WRITE_FLUSH(hw); + + /* indicate to hardware that we want to set drop enable */ + reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; + reg |= i << IXGBE_QDE_IDX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); + } +} + static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; @@ -647,15 +669,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); /* force drop enable for all VF Rx queues */ - for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { - /* flush previous write */ - IXGBE_WRITE_FLUSH(hw); - - /* indicate to hardware that we want to set drop enable */ - reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; - reg |= i << IXGBE_QDE_IDX_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); - } + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); @@ -1079,52 +1093,86 @@ int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return ixgbe_set_vf_mac(adapter, vf, mac); } +static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err = 0; + + if (adapter->vfinfo[vf].pf_vlan) + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, + vf); + if (err) + goto out; + ixgbe_set_vmvir(adapter, vlan, qos, vf); + ixgbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + adapter->vfinfo[vf].vlan_count++; + + /* enable hide vlan on X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | + IXGBE_QDE_HIDE_VLAN); + + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + +out: + return err; +} + +static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + ixgbe_clear_vmvir(adapter, vf); + ixgbe_set_vmolr(hw, vf, true); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + + return err; +} + int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan || qos) { + /* Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ if (adapter->vfinfo[vf].pf_vlan) - err = ixgbe_set_vf_vlan(adapter, false, - adapter->vfinfo[vf].pf_vlan, - vf); - if (err) - goto out; - err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); + err = ixgbe_disable_port_vlan(adapter, vf); if (err) goto out; - ixgbe_set_vmvir(adapter, vlan, qos, vf); - ixgbe_set_vmolr(hw, vf, false); - if (adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - adapter->vfinfo[vf].vlan_count++; - adapter->vfinfo[vf].pf_vlan = vlan; - adapter->vfinfo[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before" - " attempting to use the VF device.\n"); - } + err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); } else { - err = ixgbe_set_vf_vlan(adapter, false, - adapter->vfinfo[vf].pf_vlan, vf); - ixgbe_clear_vmvir(adapter, vf); - ixgbe_set_vmolr(hw, vf, true); - hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); - if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - adapter->vfinfo[vf].pf_vlan = 0; - adapter->vfinfo[vf].pf_qos = 0; + err = ixgbe_disable_port_vlan(adapter, vf); } + out: return err; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index dfd55d83bc0335ad46defa9f22923c5125e256d0..d101b25dc4b6fdc88abff20fc761ba56a3126385 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -74,9 +74,22 @@ #define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 #define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 /* General Registers */ #define IXGBE_CTRL 0x00000 @@ -84,7 +97,8 @@ #define IXGBE_CTRL_EXT 0x00018 #define IXGBE_ESDP 0x00020 #define IXGBE_EODSDP 0x00028 -#define IXGBE_I2CCTL 0x00028 +#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + 0x15F5C : 0x00028)) #define IXGBE_LEDCTL 0x00200 #define IXGBE_FRTIMER 0x00048 #define IXGBE_TCPTIMER 0x0004C @@ -112,10 +126,14 @@ #define IXGBE_VPDDIAG1 0x10208 /* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN 0x00000001 -#define IXGBE_I2C_CLK_OUT 0x00000002 -#define IXGBE_I2C_DATA_IN 0x00000004 -#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00004000 : 0x00000001) +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00000200 : 0x00000002) +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00001000 : 0x00000004) +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00000400 : 0x00000008) #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 @@ -290,8 +308,17 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_IMIRVP 0x05AC0 #define IXGBE_VMD_CTL 0x0581C #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ +/* Registers for setting up RSS on X550 with SRIOV + * _p - pool number (0..63) + * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) + */ +#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) +#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) +#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) + /* Flow Director registers */ #define IXGBE_FDIRCTRL 0x0EE00 #define IXGBE_FDIRHKEY 0x0EE68 @@ -725,6 +752,24 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_LDPCECL 0x0E820 #define IXGBE_LDPCECH 0x0E821 +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ + /* Management */ #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ @@ -798,6 +843,12 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_PBACLR_82599 0x11068 #define IXGBE_CIAA_82599 0x11088 #define IXGBE_CIAD_82599 0x1108C +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + IXGBE_CIAA_X550 : IXGBE_CIAA_82599)) +#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + IXGBE_CIAD_X550 : IXGBE_CIAD_82599)) #define IXGBE_PICAUSE 0x110B0 #define IXGBE_PIENA 0x110B8 #define IXGBE_CDQ_MBR_82599 0x110B4 @@ -1120,6 +1171,13 @@ struct ixgbe_thermal_sensor_data { /* MDIO definitions */ +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 + #define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ @@ -1129,9 +1187,23 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ + +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */ /* MII clause 22/28 definitions */ #define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ @@ -1632,6 +1704,7 @@ enum { #define IXGBE_LINKS_TL_FAULT 0x00001000 #define IXGBE_LINKS_SIGNAL 0x00000F00 +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 #define IXGBE_LINKS_SPEED_82599 0x30000000 #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 @@ -1674,12 +1747,14 @@ enum { #define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ /* SW_FW_SYNC/GSSR definitions */ -#define IXGBE_GSSR_EEP_SM 0x0001 -#define IXGBE_GSSR_PHY0_SM 0x0002 -#define IXGBE_GSSR_PHY1_SM 0x0004 -#define IXGBE_GSSR_MAC_CSR_SM 0x0008 -#define IXGBE_GSSR_FLASH_SM 0x0010 -#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 /* FW Status register bitmask */ #define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ @@ -1713,27 +1788,32 @@ enum { #define IXGBE_PBANUM_LENGTH 11 /* Checksum and EEPROM pointers */ -#define IXGBE_PBANUM_PTR_GUARD 0xFAFA -#define IXGBE_EEPROM_CHECKSUM 0x3F -#define IXGBE_EEPROM_SUM 0xBABA -#define IXGBE_PCIE_ANALOG_PTR 0x03 -#define IXGBE_ATLAS0_CONFIG_PTR 0x04 -#define IXGBE_PHY_PTR 0x04 -#define IXGBE_ATLAS1_CONFIG_PTR 0x05 -#define IXGBE_OPTION_ROM_PTR 0x05 -#define IXGBE_PCIE_GENERAL_PTR 0x06 -#define IXGBE_PCIE_CONFIG0_PTR 0x07 -#define IXGBE_PCIE_CONFIG1_PTR 0x08 -#define IXGBE_CORE0_PTR 0x09 -#define IXGBE_CORE1_PTR 0x0A -#define IXGBE_MAC0_PTR 0x0B -#define IXGBE_MAC1_PTR 0x0C -#define IXGBE_CSR0_CONFIG_PTR 0x0D -#define IXGBE_CSR1_CONFIG_PTR 0x0E -#define IXGBE_FW_PTR 0x0F -#define IXGBE_PBANUM0_PTR 0x15 -#define IXGBE_PBANUM1_PTR 0x16 -#define IXGBE_FREE_SPACE_PTR 0X3E +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 +#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 +#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 +#define IXGBE_PCIE_CONFIG_SIZE 0x08 +#define IXGBE_EEPROM_LAST_WORD 0x41 +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_FREE_SPACE_PTR 0X3E /* External Thermal Sensor Config */ #define IXGBE_ETS_CFG 0x26 @@ -1994,12 +2074,14 @@ enum { #define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 #define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 #define IXGBE_MRQC_L3L4TXSWEN 0x00008000 #define IXGBE_FWSM_TS_ENABLED 0x1 /* Queue Drop Enable */ #define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_HIDE_VLAN 0x00000002 #define IXGBE_QDE_IDX_MASK 0x00007F00 #define IXGBE_QDE_IDX_SHIFT 8 #define IXGBE_QDE_WRITE 0x00010000 @@ -2289,18 +2371,32 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIR_DROP_QUEUE 127 /* Manageablility Host Interface defines */ -#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ -#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ -#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ +#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ /* CEM Support */ -#define FW_CEM_HDR_LEN 0x4 -#define FW_CEM_CMD_DRIVER_INFO 0xDD -#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 -#define FW_CEM_CMD_RESERVED 0x0 -#define FW_CEM_UNUSED_VER 0x0 -#define FW_CEM_MAX_RETRIES 3 -#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0x0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 1024 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2313,6 +2409,25 @@ struct ixgbe_hic_hdr { u8 checksum; }; +struct ixgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ixgbe_hic_hdr2 { + struct ixgbe_hic_hdr2_req req; + struct ixgbe_hic_hdr2_rsp rsp; +}; + struct ixgbe_hic_drv_info { struct ixgbe_hic_hdr hdr; u8 port_num; @@ -2324,6 +2439,32 @@ struct ixgbe_hic_drv_info { u16 pad2; /* end spacing to ensure length is mult. of dword2 */ }; +/* These need to be dword aligned */ +struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_write_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_disable_rxen { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { @@ -2437,10 +2578,12 @@ struct ixgbe_adv_tx_context_desc { typedef u32 ixgbe_autoneg_advertised; /* Link speed */ typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_UNKNOWN 0 -#define IXGBE_LINK_SPEED_100_FULL 0x0008 -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ IXGBE_LINK_SPEED_10GB_FULL) #define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ @@ -2588,6 +2731,8 @@ enum ixgbe_mac_type { ixgbe_mac_82598EB, ixgbe_mac_82599EB, ixgbe_mac_X540, + ixgbe_mac_X550, + ixgbe_mac_X550EM_x, ixgbe_num_macs }; @@ -2596,6 +2741,9 @@ enum ixgbe_phy_type { ixgbe_phy_none, ixgbe_phy_tn, ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_ext_t, ixgbe_phy_cu_unknown, ixgbe_phy_qt, ixgbe_phy_xaui, @@ -2839,7 +2987,7 @@ struct ixgbe_eeprom_operations { s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); s32 (*update_checksum)(struct ixgbe_hw *); - u16 (*calc_checksum)(struct ixgbe_hw *); + s32 (*calc_checksum)(struct ixgbe_hw *); }; struct ixgbe_mac_operations { @@ -2861,8 +3009,8 @@ struct ixgbe_mac_operations { s32 (*disable_rx_buff)(struct ixgbe_hw *); s32 (*enable_rx_buff)(struct ixgbe_hw *); s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); - s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); - void (*release_swfw_sync)(struct ixgbe_hw *, u16); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); + void (*release_swfw_sync)(struct ixgbe_hw *, u32); s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); @@ -2908,6 +3056,11 @@ struct ixgbe_mac_operations { s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + + /* DMA Coalescing */ + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); }; struct ixgbe_phy_operations { @@ -2920,6 +3073,7 @@ struct ixgbe_phy_operations { s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_internal_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); @@ -2928,6 +3082,8 @@ struct ixgbe_phy_operations { s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); s32 (*check_overtemp)(struct ixgbe_hw *); }; @@ -2980,6 +3136,8 @@ struct ixgbe_phy_info { bool sfp_setup_needed; u32 revision; enum ixgbe_media_type media_type; + u8 lan_id; + u32 phy_semaphore_mask; bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; enum ixgbe_smart_speed smart_speed; @@ -3086,4 +3244,71 @@ struct ixgbe_info { #define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010)) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C)) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634)) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638)) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00)) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00)) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520)) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00)) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) + +#define IXGBE_KX4_LINK_CNTL_1 0x4C +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 + +#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 +#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ + (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ + (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 +#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 +#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 + #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index e88305d5d18dc20bae9eb66dd3ef5e41128d7411..ba54ff07b438cd1e42c33768f060334b7f310834 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -32,6 +32,7 @@ #include "ixgbe.h" #include "ixgbe_phy.h" +#include "ixgbe_x540.h" #define IXGBE_X540_MAX_TX_QUEUES 128 #define IXGBE_X540_MAX_RX_QUEUES 128 @@ -42,17 +43,15 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); -static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); -static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); -static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) { return ixgbe_media_type_copper; } -static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -76,9 +75,8 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed **/ -static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); @@ -92,7 +90,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { s32 status; u32 ctrl, i; @@ -179,7 +177,7 @@ mac_reset_top: * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ -static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) { s32 ret_val; @@ -197,7 +195,7 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ -static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; @@ -316,7 +314,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, * * @hw: pointer to hardware structure **/ -static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -324,6 +322,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) u16 length = 0; u16 pointer = 0; u16 word = 0; + u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; + u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; /* * Do not use hw->eeprom.ops.read because we do not want to take @@ -332,10 +332,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) */ /* Include 0x0-0x3F in the checksum */ - for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (ixgbe_read_eerd_generic(hw, i, &word) != 0) { + for (i = 0; i < checksum_last_word; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } checksum += word; } @@ -344,11 +344,11 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) * Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ - for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + for (i = ptr_start; i < IXGBE_FW_PTR; i++) { if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) continue; - if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) { + if (ixgbe_read_eerd_generic(hw, i, &pointer)) { hw_dbg(hw, "EEPROM read failed\n"); break; } @@ -358,8 +358,9 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) pointer >= hw->eeprom.word_size) continue; - if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) { + if (ixgbe_read_eerd_generic(hw, pointer, &length)) { hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; break; } @@ -368,10 +369,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) (pointer + length) >= hw->eeprom.word_size) continue; - for (j = pointer+1; j <= pointer+length; j++) { - if (ixgbe_read_eerd_generic(hw, j, &word) != 0) { + for (j = pointer + 1; j <= pointer + length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } checksum += word; } @@ -379,7 +380,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return checksum; + return (s32)checksum; } /** @@ -410,23 +411,34 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores twice here. */ status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) + goto out; - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + hw_dbg(hw, "Invalid EEPROM checksum"); + status = IXGBE_ERR_EEPROM_CHECKSUM; + } /* If the user cares, return the calculated checksum */ if (checksum_val) *checksum_val = checksum; - /* Verify read and calculated checksums are the same */ - if (read_checksum != checksum) - return IXGBE_ERR_EEPROM_CHECKSUM; +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -457,15 +469,22 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.write because we do not want to * take the synchronization semaphores twice here. */ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); - if (!status) - status = ixgbe_update_flash_X540(hw); + if (status) + goto out; + + status = ixgbe_update_flash_X540(hw); +out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -544,7 +563,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) * Acquires the SWFW semaphore thought the SW_FW_SYNC register for * the specified function (CSR, PHY0, PHY1, NVM, Flash) **/ -static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swfw_sync; u32 swmask = mask; @@ -612,7 +631,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) * Releases the SWFW semaphore through the SW_FW_SYNC register * for the specified function (CSR, PHY0, PHY1, EVM, Flash) **/ -static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swfw_sync; u32 swmask = mask; @@ -699,7 +718,7 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) * Devices that implement the version 2 interface: * X540 **/ -static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; @@ -735,7 +754,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) * Devices that implement the version 2 interface: * X540 **/ -static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h new file mode 100644 index 0000000000000000000000000000000000000000..a1468b1f4d8af0c4d10b9fb609f2790262e99f66 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -0,0 +1,39 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include "ixgbe_type.h" + +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c new file mode 100644 index 0000000000000000000000000000000000000000..ffdd1231f419a5e422b6d1bbc7b111a87650669f --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -0,0 +1,1432 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +/** ixgbe_identify_phy_x550em - Get PHY type based on device id + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + + return ixgbe_identify_module_generic(hw); + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; + case IXGBE_DEV_ID_X550EM_X_KR: + hw->phy.type = ixgbe_phy_x550em_kr; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + return ixgbe_identify_phy_generic(hw); + default: + break; + } + return 0; +} + +static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return 0; +} + +/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @phy_data: Pointer to read data from the register + **/ +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + u32 i, command, error; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usleep_range(10, 20); + + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + } + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to read, error %x\n", error); + return IXGBE_ERR_PHY; + } + + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "Read timed out\n"); + return IXGBE_ERR_PHY; + } + + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + + return 0; +} + +/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface + * command assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status; + struct ixgbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32(offset * 2); + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + if (status) + return status; + + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + + return 0; +} + +/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); + return status; + } + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32((offset + current_word) * 2); + buffer.length = cpu_to_be16(words_to_read * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, + false); + if (status) { + hw_dbg(hw, "Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); + + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** ixgbe_checksum_ptr_x550 - Checksum one pointer region + * @hw: pointer to hardware structure + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * + * Returns error status for any failure + **/ +static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) +{ + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; + + bufsz = sizeof(buf) / sizeof(buf[0]); + + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } + + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return 0; + } + + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; + + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; + + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return 0; +} + +/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) +{ + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } + + /* For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + pointer = local_buffer[i]; + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } + + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); +} + +/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + hw_dbg(hw, "Invalid EEPROM checksum"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + buffer.data = data; + buffer.address = cpu_to_be32(offset * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + hw_dbg(hw, "write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +{ + s32 status = 0; + union ixgbe_hic_hdr2 buffer; + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + status = ixgbe_update_flash_X550(hw); + + return status; +} + +/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + u32 i = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + hw_dbg(hw, "EEPROM write buffer - semaphore failed\n"); + return status; + } + + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); + if (status) { + hw_dbg(hw, "Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + **/ +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } +} + +/** ixgbe_setup_sfp_modules_X550em - Setup SFP module + * @hw: pointer to hardware structure + */ +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + bool setup_linear; + u16 reg_slice, edc_mode; + s32 ret_val; + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_unknown: + return 0; + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + setup_linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + setup_linear = false; + break; + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + /* The CS4227 slice address is the base address + the port-pair reg + * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0. + */ + reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12); + + if (setup_linear) + edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + + /* Configure CS4227 for connection type. */ + ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice, + edc_mode); + + if (ret_val) + ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice, + edc_mode); + + return ret_val; +} + +/** ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + **/ +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; + + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return 0; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + return 0; +} + +/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + u32 i, command, error; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Write IOSF data register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usleep_range(10, 20); + + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + } + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to write, error %x\n", error); + return IXGBE_ERR_PHY; + } + + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "Write timed out\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. + **/ +static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Disable training protocol FSM. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Disable Flex from training TXFFE. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Enable override for coefficients. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KX4 PHY. + **/ +s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, ®_val); + if (status) + return status; + + reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | + IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); + + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; + + /* Advertise 10G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; + + /* Advertise 1G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, reg_val); + + return status; +} + +/** ixgbe_setup_kr_x550em - Configure the KR PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY. + **/ +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY + * @hw: point to hardware structure + * + * Configures the integrated KR PHY to talk to the external PHY. The base + * driver will call this function when it gets notification via interrupt from + * the external PHY. This function forces the internal PHY into iXFI mode at + * the correct speed. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + **/ +s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 lasi, autoneg_status, speed; + ixgbe_link_speed force_speed; + + /* Verify that the external link status has changed */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi); + if (status) + return status; + + /* If there was no change in link status, we can just exit */ + if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM)) + return 0; + + /* we read this twice back to back to indicate current status */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + /* If link is not up return an error indicating treat link as down */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + return IXGBE_ERR_INVALID_LINK_SETTINGS; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); +} + +/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + **/ +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + u32 esdp; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_X550em(hw); + if (phy->sfp_type != ixgbe_sfp_type_unknown) + phy->ops.reset = NULL; + + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + phy->ops.setup_link = ixgbe_setup_kx4_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_kr: + phy->ops.setup_link = ixgbe_setup_kr_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_ext_t: + phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em; + break; + default: + break; + } + return ret_val; +} + +/** ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + * + */ +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; +} + +/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. + ** @hw: pointer to hardware structure + **/ +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + u32 retries = 2; + + do { + /* decrement retries counter and exit if we hit 0 */ + if (retries < 1) { + hw_dbg(hw, "External PHY not yet finished resetting."); + return IXGBE_ERR_PHY; + } + retries--; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + if (status) + return status; + + /* Verify PHY FW reset has completed */ + } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1); + + /* Set port to low power mode */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + /* Enable the transmitter */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + if (status) + return status; + + reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + reg); + if (status) + return status; + + /* Un-stall the PHY FW */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg &= ~IXGBE_MDIO_POWER_UP_STALL; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + return status; +} + +/** ixgbe_reset_hw_X550em - Perform hardware reset + ** @hw: pointer to hardware structure + ** + ** Resets the hardware by resetting the transmit and receive units, masks + ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) + ** reset. + **/ +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i; + bool link_up = false; + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + /* start the external PHY */ + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = ixgbe_init_ext_t_x550em(hw); + if (status) + return status; + } + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) + hw->phy.ops.reset(hw); + +mac_reset_top: + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + /* Double resets are required for recovery from certain error + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + return status; +} + +#define X550_COMMON_MAC \ + .init_hw = &ixgbe_init_hw_generic, \ + .start_hw = &ixgbe_start_hw_X540, \ + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \ + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \ + .get_mac_addr = &ixgbe_get_mac_addr_generic, \ + .get_device_caps = &ixgbe_get_device_caps_generic, \ + .stop_adapter = &ixgbe_stop_adapter_generic, \ + .get_bus_info = &ixgbe_get_bus_info_generic, \ + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ + .read_analog_reg8 = NULL, \ + .write_analog_reg8 = NULL, \ + .set_rxpba = &ixgbe_set_rxpba_generic, \ + .check_link = &ixgbe_check_mac_link_generic, \ + .led_on = &ixgbe_led_on_generic, \ + .led_off = &ixgbe_led_off_generic, \ + .blink_led_start = &ixgbe_blink_led_start_X540, \ + .blink_led_stop = &ixgbe_blink_led_stop_X540, \ + .set_rar = &ixgbe_set_rar_generic, \ + .clear_rar = &ixgbe_clear_rar_generic, \ + .set_vmdq = &ixgbe_set_vmdq_generic, \ + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \ + .clear_vmdq = &ixgbe_clear_vmdq_generic, \ + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \ + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \ + .enable_mc = &ixgbe_enable_mc_generic, \ + .disable_mc = &ixgbe_disable_mc_generic, \ + .clear_vfta = &ixgbe_clear_vfta_generic, \ + .set_vfta = &ixgbe_set_vfta_generic, \ + .fc_enable = &ixgbe_fc_enable_generic, \ + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ + .init_uta_tables = &ixgbe_init_uta_tables_generic, \ + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \ + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ + .get_thermal_sensor_data = NULL, \ + .init_thermal_sensor_thresh = NULL, \ + .prot_autoc_read = &prot_autoc_read_generic, \ + .prot_autoc_write = &prot_autoc_write_generic, \ + +static struct ixgbe_mac_operations mac_ops_X550 = { + X550_COMMON_MAC + .reset_hw = &ixgbe_reset_hw_X540, + .get_media_type = &ixgbe_get_media_type_X540, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .setup_link = &ixgbe_setup_mac_link_X540, + .set_rxpba = &ixgbe_set_rxpba_generic, + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .setup_sfp = NULL, +}; + +static struct ixgbe_mac_operations mac_ops_X550EM_x = { + X550_COMMON_MAC + .reset_hw = &ixgbe_reset_hw_X550em, + .get_media_type = &ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + +}; + +#define X550_COMMON_EEP \ + .read = &ixgbe_read_ee_hostif_X550, \ + .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ + .write = &ixgbe_write_ee_hostif_X550, \ + .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \ + .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \ + .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ + .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ + +static struct ixgbe_eeprom_operations eeprom_ops_X550 = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X550, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X540, +}; + +#define X550_COMMON_PHY \ + .identify_sfp = &ixgbe_identify_module_generic, \ + .reset = NULL, \ + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \ + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \ + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \ + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ + .check_overtemp = &ixgbe_tn_check_overtemp, \ + .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, + +static struct ixgbe_phy_operations phy_ops_X550 = { + X550_COMMON_PHY + .init = NULL, + .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .read_i2c_combined = &ixgbe_read_i2c_combined_generic, + .write_i2c_combined = &ixgbe_write_i2c_combined_generic, +}; + +static struct ixgbe_phy_operations phy_ops_X550EM_x = { + X550_COMMON_PHY + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = NULL, /* defined later */ + .write_reg = NULL, /* defined later */ + .setup_link = NULL, /* defined later */ +}; + +struct ixgbe_info ixgbe_X550_info = { + .mac = ixgbe_mac_X550, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X550, + .eeprom_ops = &eeprom_ops_X550, + .phy_ops = &phy_ops_X550, + .mbx_ops = &mbx_ops_generic, +}; + +struct ixgbe_info ixgbe_X550EM_x_info = { + .mac = ixgbe_mac_X550EM_x, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X550EM_x, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_X550EM_x, + .mbx_ops = &mbx_ops_generic, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 05e4f32d84f7410ea1ccdc5212eaedb5fbcf40cb..7412d378b77b95cb6032d4293086715002d72893 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -31,6 +31,8 @@ /* Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 8 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index ba96cb5b886d2880f83ebad953f75b05c71a3276..8c44ab25f3fa7100b3c0c351c75b0733b4c6bcd6 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -58,8 +58,9 @@ struct ixgbevf_tx_buffer { }; struct ixgbevf_rx_buffer { - struct sk_buff *skb; dma_addr_t dma; + struct page *page; + unsigned int page_offset; }; struct ixgbevf_stats { @@ -79,7 +80,6 @@ struct ixgbevf_tx_queue_stats { }; struct ixgbevf_rx_queue_stats { - u64 non_eop_descs; u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; u64 csum_err; @@ -92,9 +92,10 @@ struct ixgbevf_ring { void *desc; /* descriptor ring memory */ dma_addr_t dma; /* phys. address of descriptor ring */ unsigned int size; /* length in bytes */ - unsigned int count; /* amount of descriptors */ - unsigned int next_to_use; - unsigned int next_to_clean; + u16 count; /* amount of descriptors */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; union { struct ixgbevf_tx_buffer *tx_buffer_info; @@ -110,12 +111,11 @@ struct ixgbevf_ring { u64 hw_csum_rx_error; u8 __iomem *tail; + struct sk_buff *skb; u16 reg_idx; /* holds the special value that gets the hardware register * offset associated with this ring, which is different * for DCB and RSS modes */ - - u16 rx_buf_len; int queue_index; /* needed for multiqueue queue management */ }; @@ -134,12 +134,10 @@ struct ixgbevf_ring { /* Supported Rx Buffer Sizes */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2K 2048 -#define IXGBEVF_RXBUFFER_4K 4096 -#define IXGBEVF_RXBUFFER_8K 8192 -#define IXGBEVF_RXBUFFER_10K 10240 +#define IXGBEVF_RXBUFFER_2048 2048 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 +#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -307,6 +305,13 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG +/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) { u16 ntc = ring->next_to_clean; @@ -339,8 +344,10 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) /* board specific private data structure */ struct ixgbevf_adapter { - struct timer_list watchdog_timer; + /* this field must be first, see ixgbevf_process_skb_fields */ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct timer_list watchdog_timer; struct work_struct reset_task; struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; @@ -363,7 +370,6 @@ struct ixgbevf_adapter { struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; - u64 non_eop_descs; int num_msix_vectors; u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; @@ -373,7 +379,7 @@ struct ixgbevf_adapter { */ u32 flags; #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1) + #define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) struct msix_entry *msix_entries; @@ -423,18 +429,17 @@ enum ixbgevf_state_t { __IXGBEVF_WORK_INIT, }; -struct ixgbevf_cb { - struct sk_buff *prev; -}; -#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb) - enum ixgbevf_boards { board_82599_vf, board_X540_vf, + board_X550_vf, + board_X550EM_x_vf, }; extern const struct ixgbevf_info ixgbevf_82599_vf_info; extern const struct ixgbevf_info ixgbevf_X540_vf_info; +extern const struct ixgbevf_info ixgbevf_X550_vf_info; +extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; /* needed by ethtool.c */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 030a219c85e363802644f1302dbf56c0ac709c8b..62a0d8e0f17da5ce75d48f9ff39fca5354c4447f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -66,6 +66,8 @@ static char ixgbevf_copyright[] = static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, [board_X540_vf] = &ixgbevf_X540_vf_info, + [board_X550_vf] = &ixgbevf_X550_vf_info, + [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -79,6 +81,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, /* required last entry */ {0, } }; @@ -143,21 +147,6 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) return value; } -static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, - u32 val) -{ - rx_ring->next_to_use = val; - - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - ixgbevf_write_tail(rx_ring, val); -} - /** * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors * @adapter: pointer to adapter struct @@ -342,40 +331,13 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, return !!budget; } -/** - * ixgbevf_receive_skb - Send a completed packet up the stack - * @q_vector: structure containing interrupt and ring information - * @skb: packet to send up - * @status: hardware indication of status of receive - * @rx_desc: rx descriptor - **/ -static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, - struct sk_buff *skb, u8 status, - union ixgbe_adv_rx_desc *rx_desc) -{ - struct ixgbevf_adapter *adapter = q_vector->adapter; - bool is_vlan = (status & IXGBE_RXD_STAT_VP); - u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - - if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); - - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(&q_vector->napi, skb); - else - netif_rx(skb); -} - /** * ixgbevf_rx_skb - Helper function to determine proper Rx method * @q_vector: structure containing interrupt and ring information * @skb: packet to send up - * @status: hardware indication of status of receive - * @rx_desc: rx descriptor **/ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, - struct sk_buff *skb, u8 status, - union ixgbe_adv_rx_desc *rx_desc) + struct sk_buff *skb) { #ifdef CONFIG_NET_RX_BUSY_POLL skb_mark_napi_id(skb, &q_vector->napi); @@ -387,17 +349,17 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, } #endif /* CONFIG_NET_RX_BUSY_POLL */ - ixgbevf_receive_skb(q_vector, skb, status, rx_desc); + napi_gro_receive(&q_vector->napi, skb); } -/** - * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum - * @ring: pointer to Rx descriptor ring structure - * @status_err: hardware indication of status of receive +/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containig ring specific data + * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified - **/ + */ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, - u32 status_err, struct sk_buff *skb) + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { skb_checksum_none_assert(skb); @@ -406,16 +368,16 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, return; /* if IP and error */ - if ((status_err & IXGBE_RXD_STAT_IPCS) && - (status_err & IXGBE_RXDADV_ERR_IPE)) { + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && + ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { ring->rx_stats.csum_err++; return; } - if (!(status_err & IXGBE_RXD_STAT_L4CS)) + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) return; - if (status_err & IXGBE_RXDADV_ERR_TCPE) { + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { ring->rx_stats.csum_err++; return; } @@ -424,52 +386,408 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } +/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the checksum, VLAN, protocol, and other fields within + * the skb. + */ +static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + ixgbevf_rx_checksum(rx_ring, rx_desc, skb); + + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + unsigned long *active_vlans = netdev_priv(rx_ring->netdev); + + if (test_bit(vid & VLAN_VID_MASK, active_vlans)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +/** + * ixgbevf_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); + + if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + + return true; +} + +static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma = bi->dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + /** * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on + * @cleaned_count: number of buffers to replace **/ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, - int cleaned_count) + u16 cleaned_count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; unsigned int i = rx_ring->next_to_use; - while (cleaned_count--) { - rx_desc = IXGBEVF_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_buffer_info[i]; + /* nothing to do or no valid netdev defined */ + if (!cleaned_count || !rx_ring->netdev) + return; - if (!bi->skb) { - struct sk_buff *skb; + rx_desc = IXGBEVF_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); - if (!skb) - goto no_buffers; + do { + if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) + break; - bi->skb = skb; + /* Refresh the desc even if pkt_addr didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); - bi->dma = dma_map_single(rx_ring->dev, skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - dev_kfree_skb(skb); - bi->skb = NULL; - dev_err(rx_ring->dev, "Rx DMA map failed\n"); - break; - } + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; } - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - i++; - if (i == rx_ring->count) - i = 0; + /* clear the hdr_addr for the next_to_use descriptor */ + rx_desc->read.hdr_addr = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + ixgbevf_write_tail(rx_ring, i); } +} + +/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbevf specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; -no_buffers: - rx_ring->rx_stats.alloc_rx_buff_failed++; - if (rx_ring->next_to_use != i) - ixgbevf_release_rx_desc(rx_ring, i); + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/* ixgbevf_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbevf_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbevf_pull_tail(rx_ring, skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *old_buff) +{ + struct ixgbevf_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page = old_buff->page; + new_buff->dma = old_buff->dma; + new_buff->page_offset = old_buff->page_offset; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, + new_buff->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static inline bool ixgbevf_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + +/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + */ +static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IXGBEVF_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as is */ + if (likely(!ixgbevf_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ixgbevf_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; + +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) + return false; + +#endif + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + + return true; +} + +static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ixgbevf_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBEVF_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbevf_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; } static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, @@ -484,78 +802,51 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, int budget) { - union ixgbe_adv_rx_desc *rx_desc, *next_rxd; - struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; - struct sk_buff *skb; - unsigned int i; - u32 len, staterr; - int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = ixgbevf_desc_unused(rx_ring); + struct sk_buff *skb = rx_ring->skb; - i = rx_ring->next_to_clean; - rx_desc = IXGBEVF_RX_DESC(rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - rx_buffer_info = &rx_ring->rx_buffer_info[i]; + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; - while (staterr & IXGBE_RXD_STAT_DD) { - if (!budget) - break; - budget--; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { + ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } - rmb(); /* read descriptor and rx_buffer_info after status DD */ - len = le16_to_cpu(rx_desc->wb.upper.length); - skb = rx_buffer_info->skb; - prefetch(skb->data - NET_IP_ALIGN); - rx_buffer_info->skb = NULL; + rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); - if (rx_buffer_info->dma) { - dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - skb_put(skb, len); - } + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) + break; - i++; - if (i == rx_ring->count) - i = 0; + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); - next_rxd = IXGBEVF_RX_DESC(rx_ring, i); - prefetch(next_rxd); - cleaned_count++; + /* retrieve a buffer from the ring */ + skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); - next_buffer = &rx_ring->rx_buffer_info[i]; + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; - if (!(staterr & IXGBE_RXD_STAT_EOP)) { - skb->next = next_buffer->skb; - IXGBE_CB(skb->next)->prev = skb; - rx_ring->rx_stats.non_eop_descs++; - goto next_desc; - } + cleaned_count++; - /* we should not be chaining buffers, if we did drop the skb */ - if (IXGBE_CB(skb)->prev) { - do { - struct sk_buff *this = skb; - skb = IXGBE_CB(skb)->prev; - dev_kfree_skb(this); - } while (skb); - goto next_desc; - } + /* fetch next buffer in frame if non-eop */ + if (ixgbevf_is_non_eop(rx_ring, rx_desc)) + continue; - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { - dev_kfree_skb_irq(skb); - goto next_desc; + /* verify the packet layout is correct */ + if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; } - ixgbevf_rx_checksum(rx_ring, staterr, skb); - /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; - - skb->protocol = eth_type_trans(skb, rx_ring->netdev); /* Workaround hardware that can't do proper VEPA multicast * source pruning. @@ -565,32 +856,23 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); - goto next_desc; + continue; } - ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); + /* populate checksum, VLAN, and protocol */ + ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); -next_desc: - rx_desc->wb.upper.status_error = 0; + ixgbevf_rx_skb(q_vector, skb); - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { - ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; + /* reset skb pointer */ + skb = NULL; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + /* update budget accounting */ + total_rx_packets++; } - rx_ring->next_to_clean = i; - cleaned_count = ixgbevf_desc_unused(rx_ring); - - if (cleaned_count) - ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -634,12 +916,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ixgbevf_for_each_ring(ring, q_vector->rx) clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget) < per_ring_budget); - adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; #ifdef CONFIG_NET_RX_BUSY_POLL ixgbevf_qv_unlock_napi(q_vector); @@ -1229,19 +1509,15 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) { - struct ixgbevf_ring *rx_ring; struct ixgbe_hw *hw = &adapter->hw; u32 srrctl; - rx_ring = adapter->rx_ring[index]; - srrctl = IXGBE_SRRCTL_DROP_EN; + srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } @@ -1260,40 +1536,6 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); } -static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i; - u16 rx_buf_len; - - /* notify the PF of our intent to use this size of frame */ - ixgbevf_rlpml_set_vf(hw, max_frame); - - /* PF will allow an extra 4 bytes past for vlan tagged frames */ - max_frame += VLAN_HLEN; - - /* - * Allocate buffer sizes that fit well into 32K and - * take into account max frame size of 9.5K - */ - if ((hw->mac.type == ixgbe_mac_X540_vf) && - (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else if (max_frame <= IXGBEVF_RXBUFFER_2K) - rx_buf_len = IXGBEVF_RXBUFFER_2K; - else if (max_frame <= IXGBEVF_RXBUFFER_4K) - rx_buf_len = IXGBEVF_RXBUFFER_4K; - else if (max_frame <= IXGBEVF_RXBUFFER_8K) - rx_buf_len = IXGBEVF_RXBUFFER_8K; - else - rx_buf_len = IXGBEVF_RXBUFFER_10K; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->rx_buf_len = rx_buf_len; -} - #define IXGBEVF_MAX_RX_DESC_POLL 10 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) @@ -1371,12 +1613,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, /* reset ntu and ntc to place SW in sync with hardwdare */ ring->next_to_clean = 0; ring->next_to_use = 0; + ring->next_to_alloc = 0; ixgbevf_configure_srrctl(adapter, reg_idx); - /* prevent DMA from exceeding buffer space available */ - rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; - rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; + /* allow any size packet since we can handle overflow */ + rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; + rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); @@ -1393,11 +1636,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { int i; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; ixgbevf_setup_psrtype(adapter); - /* set_rx_buffer_len must be called before ring initialization */ - ixgbevf_set_rx_buffer_len(adapter); + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ @@ -1702,32 +1947,32 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) **/ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) { + struct device *dev = rx_ring->dev; unsigned long size; unsigned int i; + /* Free Rx ring sk_buff */ + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* ring already cleared, nothing to do */ if (!rx_ring->rx_buffer_info) return; - /* Free all the Rx ring sk_buffs */ + /* Free all the Rx ring pages */ for (i = 0; i < rx_ring->count; i++) { - struct ixgbevf_rx_buffer *rx_buffer_info; + struct ixgbevf_rx_buffer *rx_buffer; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - if (rx_buffer_info->dma) { - dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - } - if (rx_buffer_info->skb) { - struct sk_buff *skb = rx_buffer_info->skb; - rx_buffer_info->skb = NULL; - do { - struct sk_buff *this = skb; - skb = IXGBE_CB(skb)->prev; - dev_kfree_skb(this); - } while (skb); - } + rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) + dma_unmap_page(dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (rx_buffer->page) + __free_page(rx_buffer->page); + rx_buffer->page = NULL; } size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; @@ -3274,6 +3519,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; @@ -3282,7 +3528,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; default: - if (adapter->hw.mac.type == ixgbe_mac_X540_vf) + if (adapter->hw.mac.type != ixgbe_mac_82599_vf) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; } @@ -3291,17 +3537,35 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) if ((new_mtu < 68) || (max_frame > max_possible_frame)) return -EINVAL; - hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", + hw_dbg(hw, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - if (netif_running(netdev)) - ixgbevf_reinit_locked(adapter); + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, max_frame); return 0; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbevf_netpoll(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return; + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -3438,6 +3702,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = { #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = ixgbevf_busy_poll_recv, #endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbevf_netpoll, +#endif }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) @@ -3465,6 +3732,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbe_hw *hw = NULL; const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; int err, pci_using_dac; + bool disable_dev = false; err = pci_enable_device(pdev); if (err) @@ -3499,7 +3767,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); - pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; @@ -3588,16 +3855,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_register; + pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); ixgbevf_init_last_counter_stats(adapter); - /* print the MAC address */ - hw_dbg(hw, "%pM\n", netdev->dev_addr); + /* print the VF info */ + dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); + dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); - hw_dbg(hw, "MAC: %d\n", hw->mac.type); + switch (hw->mac.type) { + case ixgbe_mac_X550_vf: + dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); + break; + case ixgbe_mac_X540_vf: + dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); + break; + case ixgbe_mac_82599_vf: + default: + dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); + break; + } - hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); return 0; err_register: @@ -3606,12 +3885,13 @@ err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->io_addr); err_ioremap: + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + if (!adapter || disable_dev) pci_disable_device(pdev); return err; } @@ -3628,7 +3908,13 @@ err_dma: static void ixgbevf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_adapter *adapter; + bool disable_dev; + + if (!netdev) + return; + + adapter = netdev_priv(netdev); set_bit(__IXGBEVF_REMOVING, &adapter->state); @@ -3648,9 +3934,10 @@ static void ixgbevf_remove(struct pci_dev *pdev) hw_dbg(&adapter->hw, "Remove complete\n"); + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + if (disable_dev) pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 9cddd56d02c39305a69e13a25c2de88b4d92ccbb..cdb53be7d9958e4cb0f92456f459eacea122e038 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -617,3 +617,13 @@ const struct ixgbevf_info ixgbevf_X540_vf_info = { .mac = ixgbe_mac_X540_vf, .mac_ops = &ixgbevf_mac_ops, }; + +const struct ixgbevf_info ixgbevf_X550_vf_info = { + .mac = ixgbe_mac_X550_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { + .mac = ixgbe_mac_X550EM_x_vf, + .mac_ops = &ixgbevf_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index aa8cc8dc25d15af5bf72621a0afb66297bac18e5..5b172427f459a1c30070fdf52856ca2f0116bedb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -74,6 +74,8 @@ enum ixgbe_mac_type { ixgbe_mac_unknown = 0, ixgbe_mac_82599_vf, ixgbe_mac_X540_vf, + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, ixgbe_num_macs }; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ade067de168959b30c9e7eafc07ed40df0967ee0..96208f17bb53be6240ae9e8d0cd30cc09041163e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -216,7 +216,7 @@ /* Various constants */ /* Coalescing */ -#define MVNETA_TXDONE_COAL_PKTS 16 +#define MVNETA_TXDONE_COAL_PKTS 1 #define MVNETA_RX_COAL_PKTS 32 #define MVNETA_RX_COAL_USEC 100 @@ -1721,6 +1721,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; struct mvneta_tx_desc *tx_desc; + int len = skb->len; int frags = 0; u32 tx_cmd; @@ -1788,7 +1789,7 @@ out: u64_stats_update_begin(&stats->syncp); stats->tx_packets++; - stats->tx_bytes += skb->len; + stats->tx_bytes += len; u64_stats_update_end(&stats->syncp); } else { dev->stats.tx_dropped++; @@ -2558,11 +2559,10 @@ static void mvneta_adjust_link(struct net_device *ndev) MVNETA_GMAC_FORCE_LINK_DOWN); mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); mvneta_port_up(pp); - netdev_info(pp->dev, "link up\n"); } else { mvneta_port_down(pp); - netdev_info(pp->dev, "link down\n"); } + phy_print_status(phydev); } } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index c3b209cd06609fb85ffe3283d8c31028da8e6a60..af829c57840039e54e853bd5c4230ab0c6f17699 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -106,6 +106,7 @@ #define SDMA_CMD_ERD (1 << 7) /* Bit definitions of the Port Config Reg */ +#define PCR_DUPLEX_FULL (1 << 15) #define PCR_HS (1 << 12) #define PCR_EN (1 << 7) #define PCR_PM (1 << 0) @@ -113,11 +114,17 @@ /* Bit definitions of the Port Config Extend Reg */ #define PCXR_2BSM (1 << 28) #define PCXR_DSCP_EN (1 << 21) +#define PCXR_RMII_EN (1 << 20) +#define PCXR_AN_SPEED_DIS (1 << 19) +#define PCXR_SPEED_100 (1 << 18) #define PCXR_MFL_1518 (0 << 14) #define PCXR_MFL_1536 (1 << 14) #define PCXR_MFL_2048 (2 << 14) #define PCXR_MFL_64K (3 << 14) +#define PCXR_FLOWCTL_DIS (1 << 12) #define PCXR_FLP (1 << 11) +#define PCXR_AN_FLOWCTL_DIS (1 << 10) +#define PCXR_AN_DUPLEX_DIS (1 << 9) #define PCXR_PRIO_TX_OFF 3 #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) @@ -170,7 +177,6 @@ #define LINK_UP (1 << 3) /* Bit definitions for work to be done */ -#define WORK_LINK (1 << 0) #define WORK_TX_DONE (1 << 1) /* @@ -197,6 +203,9 @@ struct tx_desc { struct pxa168_eth_private { int port_num; /* User Ethernet port number */ int phy_addr; + int phy_speed; + int phy_duplex; + phy_interface_t phy_intf; int rx_resource_err; /* Rx ring resource error flag */ @@ -269,11 +278,11 @@ enum hash_table_entry { static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); static int pxa168_init_hw(struct pxa168_eth_private *pep); +static int pxa168_init_phy(struct net_device *dev); static void eth_port_reset(struct net_device *dev); static void eth_port_start(struct net_device *dev); static int pxa168_eth_open(struct net_device *dev); static int pxa168_eth_stop(struct net_device *dev); -static int ethernet_phy_setup(struct net_device *dev); static inline u32 rdl(struct pxa168_eth_private *pep, int offset) { @@ -305,26 +314,6 @@ static void abort_dma(struct pxa168_eth_private *pep) netdev_err(pep->dev, "%s : DMA Stuck\n", __func__); } -static int ethernet_phy_get(struct pxa168_eth_private *pep) -{ - unsigned int reg_data; - - reg_data = rdl(pep, PHY_ADDRESS); - - return (reg_data >> (5 * pep->port_num)) & 0x1f; -} - -static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr) -{ - u32 reg_data; - int addr_shift = 5 * pep->port_num; - - reg_data = rdl(pep, PHY_ADDRESS); - reg_data &= ~(0x1f << addr_shift); - reg_data |= (phy_addr & 0x1f) << addr_shift; - wrl(pep, PHY_ADDRESS, reg_data); -} - static void rxq_refill(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); @@ -655,14 +644,7 @@ static void eth_port_start(struct net_device *dev) struct pxa168_eth_private *pep = netdev_priv(dev); int tx_curr_desc, rx_curr_desc; - /* Perform PHY reset, if there is a PHY. */ - if (pep->phy != NULL) { - struct ethtool_cmd cmd; - - pxa168_get_settings(pep->dev, &cmd); - phy_init_hw(pep->phy); - pxa168_set_settings(pep->dev, &cmd); - } + phy_start(pep->phy); /* Assignment of Tx CTRP of given queue */ tx_curr_desc = pep->tx_curr_desc_q; @@ -717,6 +699,8 @@ static void eth_port_reset(struct net_device *dev) val = rdl(pep, PORT_CONFIG); val &= ~PCR_EN; wrl(pep, PORT_CONFIG, val); + + phy_stop(pep->phy); } /* @@ -884,43 +868,9 @@ static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, } if (icr & ICR_RXBUF) ret = 1; - if (icr & ICR_MII_CH) { - pep->work_todo |= WORK_LINK; - ret = 1; - } return ret; } -static void handle_link_event(struct pxa168_eth_private *pep) -{ - struct net_device *dev = pep->dev; - u32 port_status; - int speed; - int duplex; - int fc; - - port_status = rdl(pep, PORT_STATUS); - if (!(port_status & LINK_UP)) { - if (netif_carrier_ok(dev)) { - netdev_info(dev, "link down\n"); - netif_carrier_off(dev); - txq_reclaim(dev, 1); - } - return; - } - if (port_status & PORT_SPEED_100) - speed = 100; - else - speed = 10; - - duplex = (port_status & FULL_DUPLEX) ? 1 : 0; - fc = (port_status & FLOW_CONTROL_DISABLED) ? 0 : 1; - netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", - speed, duplex ? "full" : "half", fc ? "en" : "dis"); - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); -} - static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; @@ -978,8 +928,11 @@ static int set_port_config_ext(struct pxa168_eth_private *pep) skb_size = PCXR_MFL_64K; /* Extended Port Configuration */ - wrl(pep, - PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */ + wrl(pep, PORT_CONFIG_EXT, + PCXR_AN_SPEED_DIS | /* Disable HW AN */ + PCXR_AN_DUPLEX_DIS | + PCXR_AN_FLOWCTL_DIS | + PCXR_2BSM | /* Two byte prefix aligns IP hdr */ PCXR_DSCP_EN | /* Enable DSCP in IP */ skb_size | PCXR_FLP | /* do not force link pass */ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ @@ -987,6 +940,69 @@ static int set_port_config_ext(struct pxa168_eth_private *pep) return 0; } +static void pxa168_eth_adjust_link(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct phy_device *phy = pep->phy; + u32 cfg, cfg_o = rdl(pep, PORT_CONFIG); + u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT); + + cfg = cfg_o & ~PCR_DUPLEX_FULL; + cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN); + + if (phy->interface == PHY_INTERFACE_MODE_RMII) + cfgext |= PCXR_RMII_EN; + if (phy->speed == SPEED_100) + cfgext |= PCXR_SPEED_100; + if (phy->duplex) + cfg |= PCR_DUPLEX_FULL; + if (!phy->pause) + cfgext |= PCXR_FLOWCTL_DIS; + + /* Bail out if there has nothing changed */ + if (cfg == cfg_o && cfgext == cfgext_o) + return; + + wrl(pep, PORT_CONFIG, cfg); + wrl(pep, PORT_CONFIG_EXT, cfgext); + + phy_print_status(phy); +} + +static int pxa168_init_phy(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct ethtool_cmd cmd; + int err; + + if (pep->phy) + return 0; + + pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); + if (!pep->phy) + return -ENODEV; + + err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link, + pep->phy_intf); + if (err) + return err; + + err = pxa168_get_settings(dev, &cmd); + if (err) + return err; + + cmd.phy_address = pep->phy_addr; + cmd.speed = pep->phy_speed; + cmd.duplex = pep->phy_duplex; + cmd.advertising = PHY_BASIC_FEATURES; + cmd.autoneg = AUTONEG_ENABLE; + + if (cmd.speed != 0) + cmd.autoneg = AUTONEG_DISABLE; + + return pxa168_set_settings(dev, &cmd); +} + static int pxa168_init_hw(struct pxa168_eth_private *pep) { int err = 0; @@ -1133,6 +1149,10 @@ static int pxa168_eth_open(struct net_device *dev) struct pxa168_eth_private *pep = netdev_priv(dev); int err; + err = pxa168_init_phy(dev); + if (err) + return err; + err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev); if (err) { dev_err(&dev->dev, "can't assign irq\n"); @@ -1153,8 +1173,8 @@ static int pxa168_eth_open(struct net_device *dev) pep->rx_used_desc_q = 0; pep->rx_curr_desc_q = 0; netif_carrier_off(dev); - eth_port_start(dev); napi_enable(&pep->napi); + eth_port_start(dev); return 0; out_free_rx_skb: rxq_deinit(dev); @@ -1231,10 +1251,6 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) struct net_device *dev = pep->dev; int work_done = 0; - if (unlikely(pep->work_todo & WORK_LINK)) { - pep->work_todo &= ~(WORK_LINK); - handle_link_event(pep); - } /* * We call txq_reclaim every time since in NAPI interupts are disabled * and due to this we miss the TX_DONE interrupt,which is not updated in @@ -1357,77 +1373,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, return -EOPNOTSUPP; } -static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr) -{ - struct mii_bus *bus = pep->smi_bus; - struct phy_device *phydev; - int start; - int num; - int i; - - if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) { - /* Scan entire range */ - start = ethernet_phy_get(pep); - num = 32; - } else { - /* Use phy addr specific to platform */ - start = phy_addr & 0x1f; - num = 1; - } - phydev = NULL; - for (i = 0; i < num; i++) { - int addr = (start + i) & 0x1f; - if (bus->phy_map[addr] == NULL) - mdiobus_scan(bus, addr); - - if (phydev == NULL) { - phydev = bus->phy_map[addr]; - if (phydev != NULL) - ethernet_phy_set_addr(pep, addr); - } - } - - return phydev; -} - -static void phy_init(struct pxa168_eth_private *pep) -{ - struct phy_device *phy = pep->phy; - - phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII); - - if (pep->pd && pep->pd->speed != 0) { - phy->autoneg = AUTONEG_DISABLE; - phy->advertising = 0; - phy->speed = pep->pd->speed; - phy->duplex = pep->pd->duplex; - } else { - phy->autoneg = AUTONEG_ENABLE; - phy->speed = 0; - phy->duplex = 0; - phy->supported &= PHY_BASIC_FEATURES; - phy->advertising = phy->supported | ADVERTISED_Autoneg; - } - - phy_start_aneg(phy); -} - -static int ethernet_phy_setup(struct net_device *dev) -{ - struct pxa168_eth_private *pep = netdev_priv(dev); - - if (pep->pd && pep->pd->init) - pep->pd->init(); - - pep->phy = phy_scan(pep, pep->phy_addr & 0x1f); - if (pep->phy != NULL) - phy_init(pep); - - update_hash_table_mac_address(pep, NULL, dev->dev_addr); - - return 0; -} - static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct pxa168_eth_private *pep = netdev_priv(dev); @@ -1505,16 +1450,14 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep = netdev_priv(dev); pep->dev = dev; pep->clk = clk; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - err = -ENODEV; - goto err_netdev; - } pep->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pep->base)) { err = -ENOMEM; goto err_netdev; } + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); BUG_ON(!res); dev->irq = res->start; @@ -1552,13 +1495,23 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep->port_num = pep->pd->port_number; pep->phy_addr = pep->pd->phy_addr; + pep->phy_speed = pep->pd->speed; + pep->phy_duplex = pep->pd->duplex; + pep->phy_intf = pep->pd->intf; + + if (pep->pd->init) + pep->pd->init(); } else if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "port-id", &pep->port_num); np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (np) - of_property_read_u32(np, "reg", &pep->phy_addr); + if (!np) { + dev_err(&pdev->dev, "missing phy-handle\n"); + return -EINVAL; + } + of_property_read_u32(np, "reg", &pep->phy_addr); + pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); } /* Hardware supports only 3 ports */ @@ -1587,11 +1540,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) if (err) goto err_free_mdio; - pxa168_init_hw(pep); - err = ethernet_phy_setup(dev); - if (err) - goto err_mdiobus; SET_NETDEV_DEV(dev, &pdev->dev); + pxa168_init_hw(pep); err = register_netdev(dev); if (err) goto err_mdiobus; @@ -1621,13 +1571,13 @@ static int pxa168_eth_remove(struct platform_device *pdev) pep->htpr, pep->htpr_dma); pep->htpr = NULL; } + if (pep->phy) + phy_disconnect(pep->phy); if (pep->clk) { clk_disable(pep->clk); clk_put(pep->clk); pep->clk = NULL; } - if (pep->phy != NULL) - phy_detach(pep->phy); iounmap(pep->base); pep->base = NULL; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 264eab7d3b26e0cb53774ae160ecdbe235be1f7b..7173836fe361962d5c88796ac34c07c9d5ecd166 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3433,10 +3433,9 @@ static irqreturn_t skge_intr(int irq, void *dev_id) if (status & IS_HW_ERR) skge_error_irq(hw); - +out: skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); -out: spin_unlock(&hw->hw_lock); return IRQ_RETVAL(handled); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index bd3366267039bc233e526acf3926d4f8b6fb5638..867a6a3ef81f72c4c9da17ea861ab1bf30628833 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1290,14 +1290,6 @@ static void rx_set_checksum(struct sky2_port *sky2) ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } -/* - * Fixed initial key as seed to RSS. - */ -static const uint32_t rss_init_key[10] = { - 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43, - 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30 -}; - /* Enable/disable receive hash calculation (RSS) */ static void rx_set_rss(struct net_device *dev, netdev_features_t features) { @@ -1313,9 +1305,12 @@ static void rx_set_rss(struct net_device *dev, netdev_features_t features) /* Program RSS initial values */ if (features & NETIF_F_RXHASH) { + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i < nkeys; i++) sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), - rss_init_key[i]); + rss_key[i]); /* Need to turn on (undocumented) flag to make hashing work */ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), @@ -1366,7 +1361,9 @@ static void sky2_rx_clean(struct sky2_port *sky2) { unsigned i; - memset(sky2->rx_le, 0, RX_LE_BYTES); + if (sky2->rx_le) + memset(sky2->rx_le, 0, RX_LE_BYTES); + for (i = 0; i < sky2->rx_pending; i++) { struct rx_ring_info *re = sky2->rx_ring + i; @@ -2419,6 +2416,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) imask = sky2_read32(hw, B0_IMSK); sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); dev->trans_start = jiffies; /* prevent tx timeout */ napi_disable(&hw->napi); @@ -3487,8 +3485,8 @@ static void sky2_all_down(struct sky2_hw *hw) int i; if (hw->flags & SKY2_HW_IRQ_SETUP) { - sky2_read32(hw, B0_IMSK); sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); synchronize_irq(hw->pdev->irq); napi_disable(&hw->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index b0297da50304e95d83bb3ab5912172b9dc9752f9..963dd7e6d54780bc21233a016e6bed0ef48866da 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -76,22 +76,53 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr) mlx4_bitmap_free_range(bitmap, obj, 1, use_rr); } -u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) +static unsigned long find_aligned_range(unsigned long *bitmap, + u32 start, u32 nbits, + int len, int align, u32 skip_mask) +{ + unsigned long end, i; + +again: + start = ALIGN(start, align); + + while ((start < nbits) && (test_bit(start, bitmap) || + (start & skip_mask))) + start += align; + + if (start >= nbits) + return -1; + + end = start+len; + if (end > nbits) + return -1; + + for (i = start + 1; i < end; i++) { + if (test_bit(i, bitmap) || ((u32)i & skip_mask)) { + start = i + 1; + goto again; + } + } + + return start; +} + +u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, + int align, u32 skip_mask) { u32 obj; - if (likely(cnt == 1 && align == 1)) + if (likely(cnt == 1 && align == 1 && !skip_mask)) return mlx4_bitmap_alloc(bitmap); spin_lock(&bitmap->lock); - obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, - bitmap->last, cnt, align - 1); + obj = find_aligned_range(bitmap->table, bitmap->last, + bitmap->max, cnt, align, skip_mask); if (obj >= bitmap->max) { bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) & bitmap->mask; - obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, - 0, cnt, align - 1); + obj = find_aligned_range(bitmap->table, 0, bitmap->max, + cnt, align, skip_mask); } if (obj < bitmap->max) { @@ -118,6 +149,11 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) return bitmap->avail; } +static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj) +{ + return obj & (bitmap->max + bitmap->reserved_top - 1); +} + void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, int use_rr) { @@ -147,6 +183,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, bitmap->mask = mask; bitmap->reserved_top = reserved_top; bitmap->avail = num - reserved_top - reserved_bot; + bitmap->effective_len = bitmap->avail; spin_lock_init(&bitmap->lock); bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * sizeof (long), GFP_KERNEL); @@ -163,6 +200,382 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) kfree(bitmap->table); } +struct mlx4_zone_allocator { + struct list_head entries; + struct list_head prios; + u32 last_uid; + u32 mask; + /* protect the zone_allocator from concurrent accesses */ + spinlock_t lock; + enum mlx4_zone_alloc_flags flags; +}; + +struct mlx4_zone_entry { + struct list_head list; + struct list_head prio_list; + u32 uid; + struct mlx4_zone_allocator *allocator; + struct mlx4_bitmap *bitmap; + int use_rr; + int priority; + int offset; + enum mlx4_zone_flags flags; +}; + +struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags) +{ + struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL); + + if (NULL == zones) + return NULL; + + INIT_LIST_HEAD(&zones->entries); + INIT_LIST_HEAD(&zones->prios); + spin_lock_init(&zones->lock); + zones->last_uid = 0; + zones->mask = 0; + zones->flags = flags; + + return zones; +} + +int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, + struct mlx4_bitmap *bitmap, + u32 flags, + int priority, + int offset, + u32 *puid) +{ + u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1); + struct mlx4_zone_entry *it; + struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); + + if (NULL == zone) + return -ENOMEM; + + zone->flags = flags; + zone->bitmap = bitmap; + zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; + zone->priority = priority; + zone->offset = offset; + + spin_lock(&zone_alloc->lock); + + zone->uid = zone_alloc->last_uid++; + zone->allocator = zone_alloc; + + if (zone_alloc->mask < mask) + zone_alloc->mask = mask; + + list_for_each_entry(it, &zone_alloc->prios, prio_list) + if (it->priority >= priority) + break; + + if (&it->prio_list == &zone_alloc->prios || it->priority > priority) + list_add_tail(&zone->prio_list, &it->prio_list); + list_add_tail(&zone->list, &it->list); + + spin_unlock(&zone_alloc->lock); + + *puid = zone->uid; + + return 0; +} + +/* Should be called under a lock */ +static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) +{ + struct mlx4_zone_allocator *zone_alloc = entry->allocator; + + if (!list_empty(&entry->prio_list)) { + /* Check if we need to add an alternative node to the prio list */ + if (!list_is_last(&entry->list, &zone_alloc->entries)) { + struct mlx4_zone_entry *next = list_first_entry(&entry->list, + typeof(*next), + list); + + if (next->priority == entry->priority) + list_add_tail(&next->prio_list, &entry->prio_list); + } + + list_del(&entry->prio_list); + } + + list_del(&entry->list); + + if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) { + u32 mask = 0; + struct mlx4_zone_entry *it; + + list_for_each_entry(it, &zone_alloc->prios, prio_list) { + u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1); + + if (mask < cur_mask) + mask = cur_mask; + } + zone_alloc->mask = mask; + } + + return 0; +} + +void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) +{ + struct mlx4_zone_entry *zone, *tmp; + + spin_lock(&zone_alloc->lock); + + list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) { + list_del(&zone->list); + list_del(&zone->prio_list); + kfree(zone); + } + + spin_unlock(&zone_alloc->lock); + kfree(zone_alloc); +} + +/* Should be called under a lock */ +static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, + int align, u32 skip_mask, u32 *puid) +{ + u32 uid; + u32 res; + struct mlx4_zone_allocator *zone_alloc = zone->allocator; + struct mlx4_zone_entry *curr_node; + + res = mlx4_bitmap_alloc_range(zone->bitmap, count, + align, skip_mask); + + if (res != (u32)-1) { + res += zone->offset; + uid = zone->uid; + goto out; + } + + list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) { + if (unlikely(curr_node->priority == zone->priority)) + break; + } + + if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) { + struct mlx4_zone_entry *it = curr_node; + + list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) { + res = mlx4_bitmap_alloc_range(it->bitmap, count, + align, skip_mask); + if (res != (u32)-1) { + res += it->offset; + uid = it->uid; + goto out; + } + } + } + + if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) { + struct mlx4_zone_entry *it = curr_node; + + list_for_each_entry_from(it, &zone_alloc->entries, list) { + if (unlikely(it == zone)) + continue; + + if (unlikely(it->priority != curr_node->priority)) + break; + + res = mlx4_bitmap_alloc_range(it->bitmap, count, + align, skip_mask); + if (res != (u32)-1) { + res += it->offset; + uid = it->uid; + goto out; + } + } + } + + if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) { + if (list_is_last(&curr_node->prio_list, &zone_alloc->prios)) + goto out; + + curr_node = list_first_entry(&curr_node->prio_list, + typeof(*curr_node), + prio_list); + + list_for_each_entry_from(curr_node, &zone_alloc->entries, list) { + res = mlx4_bitmap_alloc_range(curr_node->bitmap, count, + align, skip_mask); + if (res != (u32)-1) { + res += curr_node->offset; + uid = curr_node->uid; + goto out; + } + } + } + +out: + if (NULL != puid && res != (u32)-1) + *puid = uid; + return res; +} + +/* Should be called under a lock */ +static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj, + u32 count) +{ + mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr); +} + +/* Should be called under a lock */ +static struct mlx4_zone_entry *__mlx4_find_zone_by_uid( + struct mlx4_zone_allocator *zones, u32 uid) +{ + struct mlx4_zone_entry *zone; + + list_for_each_entry(zone, &zones->entries, list) { + if (zone->uid == uid) + return zone; + } + + return NULL; +} + +struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid) +{ + struct mlx4_zone_entry *zone; + struct mlx4_bitmap *bitmap; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + bitmap = zone == NULL ? NULL : zone->bitmap; + + spin_unlock(&zones->lock); + + return bitmap; +} + +int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid) +{ + struct mlx4_zone_entry *zone; + int res; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + if (NULL == zone) { + res = -1; + goto out; + } + + res = __mlx4_zone_remove_one_entry(zone); + +out: + spin_unlock(&zones->lock); + kfree(zone); + + return res; +} + +/* Should be called under a lock */ +static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique( + struct mlx4_zone_allocator *zones, u32 obj) +{ + struct mlx4_zone_entry *zone, *zone_candidate = NULL; + u32 dist = (u32)-1; + + /* Search for the smallest zone that this obj could be + * allocated from. This is done in order to handle + * situations when small bitmaps are allocated from bigger + * bitmaps (and the allocated space is marked as reserved in + * the bigger bitmap. + */ + list_for_each_entry(zone, &zones->entries, list) { + if (obj >= zone->offset) { + u32 mobj = (obj - zone->offset) & zones->mask; + + if (mobj < zone->bitmap->max) { + u32 curr_dist = zone->bitmap->effective_len; + + if (curr_dist < dist) { + dist = curr_dist; + zone_candidate = zone; + } + } + } + } + + return zone_candidate; +} + +u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, + int align, u32 skip_mask, u32 *puid) +{ + struct mlx4_zone_entry *zone; + int res = -1; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + if (NULL == zone) + goto out; + + res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid); + +out: + spin_unlock(&zones->lock); + + return res; +} + +u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count) +{ + struct mlx4_zone_entry *zone; + int res = 0; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + if (NULL == zone) { + res = -1; + goto out; + } + + __mlx4_free_from_zone(zone, obj, count); + +out: + spin_unlock(&zones->lock); + + return res; +} + +u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count) +{ + struct mlx4_zone_entry *zone; + int res; + + if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP)) + return -EFAULT; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid_unique(zones, obj); + + if (NULL == zone) { + res = -1; + goto out; + } + + __mlx4_free_from_zone(zone, obj, count); + res = 0; + +out: + spin_unlock(&zones->lock); + + return res; +} /* * Handling for queue buffers -- we allocate a bunch of memory and * register it in a memory region at HCA virtual address 0. If the diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index b16e1b95566f4db9708540742c4002233a429cb2..5c93d1451c449e328fe05505bc848a146af3df9e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -990,11 +990,11 @@ static struct mlx4_cmd_info cmd_info[] = { { .opcode = MLX4_CMD_CONFIG_DEV, .has_inbox = false, - .has_outbox = false, + .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = mlx4_CMD_EPERM_wrapper + .wrapper = mlx4_CONFIG_DEV_wrapper }, { .opcode = MLX4_CMD_ALLOC_RES, @@ -1338,6 +1338,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_QUERY_IF_STAT_wrapper }, + { + .opcode = MLX4_CMD_ACCESS_REG, + .has_inbox = true, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ACCESS_REG_wrapper, + }, /* Native multicast commands are not available for guests */ { .opcode = MLX4_CMD_QP_ATTACH, @@ -2108,50 +2117,52 @@ err_vhcr: int mlx4_cmd_init(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); + int flags = 0; + + if (!priv->cmd.initialized) { + mutex_init(&priv->cmd.hcr_mutex); + mutex_init(&priv->cmd.slave_cmd_mutex); + sema_init(&priv->cmd.poll_sem, 1); + priv->cmd.use_events = 0; + priv->cmd.toggle = 1; + priv->cmd.initialized = 1; + flags |= MLX4_CMD_CLEANUP_STRUCT; + } - mutex_init(&priv->cmd.hcr_mutex); - mutex_init(&priv->cmd.slave_cmd_mutex); - sema_init(&priv->cmd.poll_sem, 1); - priv->cmd.use_events = 0; - priv->cmd.toggle = 1; - - priv->cmd.hcr = NULL; - priv->mfunc.vhcr = NULL; - - if (!mlx4_is_slave(dev)) { + if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE); if (!priv->cmd.hcr) { mlx4_err(dev, "Couldn't map command register\n"); - return -ENOMEM; + goto err; } + flags |= MLX4_CMD_CLEANUP_HCR; } - if (mlx4_is_mfunc(dev)) { + if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, &priv->mfunc.vhcr_dma, GFP_KERNEL); if (!priv->mfunc.vhcr) - goto err_hcr; + goto err; + + flags |= MLX4_CMD_CLEANUP_VHCR; } - priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, - MLX4_MAILBOX_SIZE, - MLX4_MAILBOX_SIZE, 0); - if (!priv->cmd.pool) - goto err_vhcr; + if (!priv->cmd.pool) { + priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, + MLX4_MAILBOX_SIZE, + MLX4_MAILBOX_SIZE, 0); + if (!priv->cmd.pool) + goto err; - return 0; + flags |= MLX4_CMD_CLEANUP_POOL; + } -err_vhcr: - if (mlx4_is_mfunc(dev)) - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, - priv->mfunc.vhcr, priv->mfunc.vhcr_dma); - priv->mfunc.vhcr = NULL; + return 0; -err_hcr: - if (!mlx4_is_slave(dev)) - iounmap(priv->cmd.hcr); +err: + mlx4_cmd_cleanup(dev, flags); return -ENOMEM; } @@ -2175,18 +2186,28 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev) iounmap(priv->mfunc.comm); } -void mlx4_cmd_cleanup(struct mlx4_dev *dev) +void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) { struct mlx4_priv *priv = mlx4_priv(dev); - pci_pool_destroy(priv->cmd.pool); + if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) { + pci_pool_destroy(priv->cmd.pool); + priv->cmd.pool = NULL; + } - if (!mlx4_is_slave(dev)) + if (!mlx4_is_slave(dev) && priv->cmd.hcr && + (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) { iounmap(priv->cmd.hcr); - if (mlx4_is_mfunc(dev)) + priv->cmd.hcr = NULL; + } + if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && + (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, priv->mfunc.vhcr, priv->mfunc.vhcr_dma); - priv->mfunc.vhcr = NULL; + priv->mfunc.vhcr = NULL; + } + if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT)) + priv->cmd.initialized = 0; } /* diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 56022d6478370d9b8d71c84c3b2220fa9aa73180..e71f31387ac6c73b843fef3f023cf6150327fe09 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -52,6 +52,51 @@ #define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8) #define MLX4_EQ_STATE_FIRED (10 << 8) +#define TASKLET_MAX_TIME 2 +#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME) + +void mlx4_cq_tasklet_cb(unsigned long data) +{ + unsigned long flags; + unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES; + struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data; + struct mlx4_cq *mcq, *temp; + + spin_lock_irqsave(&ctx->lock, flags); + list_splice_tail_init(&ctx->list, &ctx->process_list); + spin_unlock_irqrestore(&ctx->lock, flags); + + list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { + list_del_init(&mcq->tasklet_ctx.list); + mcq->tasklet_ctx.comp(mcq); + if (atomic_dec_and_test(&mcq->refcount)) + complete(&mcq->free); + if (time_after(jiffies, end)) + break; + } + + if (!list_empty(&ctx->process_list)) + tasklet_schedule(&ctx->task); +} + +static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) +{ + unsigned long flags; + struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; + + spin_lock_irqsave(&tasklet_ctx->lock, flags); + /* When migrating CQs between EQs will be implemented, please note + * that you need to sync this point. It is possible that + * while migrating a CQ, completions on the old EQs could + * still arrive. + */ + if (list_empty_careful(&cq->tasklet_ctx.list)) { + atomic_inc(&cq->refcount); + list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); + } + spin_unlock_irqrestore(&tasklet_ctx->lock, flags); +} + void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) { struct mlx4_cq *cq; @@ -292,6 +337,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, cq->uar = uar; atomic_set(&cq->refcount, 1); init_completion(&cq->free); + cq->comp = mlx4_add_cq_to_tasklet; + cq->tasklet_ctx.priv = + &priv->eq_table.eq[cq->vector].tasklet_ctx; + INIT_LIST_HEAD(&cq->tasklet_ctx.list); + cq->irq = priv->eq_table.eq[cq->vector].irq; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 57dda95b67d8d4325e19f03b6d01f982cfc54cb7..999014413b1aa8543c0e261f0cf38943ac0ca8e1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -35,52 +35,6 @@ #include "mlx4_en.h" -int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; - int port_up = 0; - int err = 0; - - if (priv->hwtstamp_config.tx_type == tx_type && - priv->hwtstamp_config.rx_filter == rx_filter) - return 0; - - mutex_lock(&mdev->state_lock); - if (priv->port_up) { - port_up = 1; - mlx4_en_stop_port(dev, 1); - } - - mlx4_en_free_resources(priv); - - en_warn(priv, "Changing Time Stamp configuration\n"); - - priv->hwtstamp_config.tx_type = tx_type; - priv->hwtstamp_config.rx_filter = rx_filter; - - if (rx_filter != HWTSTAMP_FILTER_NONE) - dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; - else - dev->features |= NETIF_F_HW_VLAN_CTAG_RX; - - err = mlx4_en_alloc_resources(priv); - if (err) { - en_err(priv, "Failed reallocating port resources\n"); - goto out; - } - if (port_up) { - err = mlx4_en_start_port(dev); - if (err) - en_err(priv, "Failed starting port\n"); - } - -out: - mutex_unlock(&mdev->state_lock); - netdev_features_change(dev); - return err; -} - /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) */ static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ae83da9cd18a3f870ab51bc3ff9cb207b6f0432e..90e0f045a6bc2ae29071e286dcb6360254997ceb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -114,7 +115,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = { "tso_packets", "xmit_more", "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", - "rx_csum_good", "rx_csum_none", "tx_chksum_offload", + "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", /* packet statistics */ "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3", @@ -374,7 +375,302 @@ static void mlx4_en_get_strings(struct net_device *dev, } } -static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static u32 mlx4_en_autoneg_get(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + u32 autoneg = AUTONEG_DISABLE; + + if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) && + (priv->port_state.flags & MLX4_EN_PORT_ANE)) + autoneg = AUTONEG_ENABLE; + + return autoneg; +} + +static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return SUPPORTED_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return SUPPORTED_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return SUPPORTED_Backplane; + } + return 0; +} + +static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper); + + if (!eth_proto) /* link down */ + eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return PORT_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return PORT_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_56GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) { + return PORT_DA; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return PORT_NONE; + } + return PORT_OTHER; +} + +#define MLX4_LINK_MODES_SZ \ + (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) + +enum ethtool_report { + SUPPORTED = 0, + ADVERTISED = 1, + SPEED = 2 +}; + +/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ +static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { + [MLX4_100BASE_TX] = { + SUPPORTED_100baseT_Full, + ADVERTISED_100baseT_Full, + SPEED_100 + }, + + [MLX4_1000BASE_T] = { + SUPPORTED_1000baseT_Full, + ADVERTISED_1000baseT_Full, + SPEED_1000 + }, + [MLX4_1000BASE_CX_SGMII] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + [MLX4_1000BASE_KX] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + + [MLX4_10GBASE_T] = { + SUPPORTED_10000baseT_Full, + ADVERTISED_10000baseT_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_SR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + + [MLX4_20GBASE_KR2] = { + SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, + ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, + SPEED_20000 + }, + + [MLX4_40GBASE_CR4] = { + SUPPORTED_40000baseCR4_Full, + ADVERTISED_40000baseCR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_KR4] = { + SUPPORTED_40000baseKR4_Full, + ADVERTISED_40000baseKR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_SR4] = { + SUPPORTED_40000baseSR4_Full, + ADVERTISED_40000baseSR4_Full, + SPEED_40000 + }, + + [MLX4_56GBASE_KR4] = { + SUPPORTED_56000baseKR4_Full, + ADVERTISED_56000baseKR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_CR4] = { + SUPPORTED_56000baseCR4_Full, + ADVERTISED_56000baseCR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_SR4] = { + SUPPORTED_56000baseSR4_Full, + ADVERTISED_56000baseSR4_Full, + SPEED_56000 + }, +}; + +static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) +{ + int i; + u32 link_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (eth_proto & MLX4_PROT_MASK(i)) + link_modes |= ptys2ethtool_map[i][report]; + } + return link_modes; +} + +static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][report] & link_modes) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +/* Convert actual speed (SPEED_XXX) to ptys link modes */ +static u32 speed2ptys_link_modes(u32 speed) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][SPEED] == speed) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +static int ethtool_get_ptys_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + u32 eth_proto; + int ret; + + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)", + ret); + return ret; + } + en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n", + ptys_reg.proto_mask); + en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n", + be32_to_cpu(ptys_reg.eth_proto_cap)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n", + be32_to_cpu(ptys_reg.eth_proto_admin)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n", + be32_to_cpu(ptys_reg.eth_proto_oper)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", + be32_to_cpu(ptys_reg.eth_proto_lp_adv)); + + cmd->supported = 0; + cmd->advertising = 0; + + cmd->supported |= ptys_get_supported_port(&ptys_reg); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); + cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); + cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; + + cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? + ADVERTISED_Asym_Pause : 0; + + cmd->port = ptys_get_active_port(&ptys_reg); + cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? + XCVR_EXTERNAL : XCVR_INTERNAL; + + if (mlx4_en_autoneg_get(dev)) { + cmd->supported |= SUPPORTED_Autoneg; + cmd->advertising |= ADVERTISED_Autoneg; + } + + cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); + cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + ADVERTISED_Autoneg : 0; + + cmd->phy_address = 0; + cmd->mdio_support = 0; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + + return ret; +} + +static void ethtool_get_default_settings(struct net_device *dev, + struct ethtool_cmd *cmd) { struct mlx4_en_priv *priv = netdev_priv(dev); int trans_type; @@ -382,18 +678,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->autoneg = AUTONEG_DISABLE; cmd->supported = SUPPORTED_10000baseT_Full; cmd->advertising = ADVERTISED_10000baseT_Full; - - if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) - return -ENOMEM; - - trans_type = priv->port_state.transciver; - if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); - cmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); - cmd->duplex = DUPLEX_UNKNOWN; - } + trans_type = priv->port_state.transceiver; if (trans_type > 0 && trans_type <= 0xC) { cmd->port = PORT_FIBRE; @@ -409,17 +694,118 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->port = -1; cmd->transceiver = -1; } +} + +static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int ret = -EINVAL; + + if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) + return -ENOMEM; + + en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n", + priv->port_state.flags & MLX4_EN_PORT_ANC, + priv->port_state.flags & MLX4_EN_PORT_ANE); + + if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) + ret = ethtool_get_ptys_settings(dev, cmd); + if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ + ethtool_get_default_settings(dev, cmd); + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); + cmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } return 0; } +/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */ +static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, + __be32 proto_cap) +{ + __be32 proto_admin = 0; + + if (!speed) { /* Speed = 0 ==> Reset Link modes */ + proto_admin = proto_cap; + en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n", + be32_to_cpu(proto_cap)); + } else { + u32 ptys_link_modes = speed2ptys_link_modes(speed); + + proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap; + en_info(priv, "Setting Speed to %d\n", speed); + } + return proto_admin; +} + static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - if ((cmd->autoneg == AUTONEG_ENABLE) || - (ethtool_cmd_speed(cmd) != SPEED_10000) || - (cmd->duplex != DUPLEX_FULL)) + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + __be32 proto_admin; + int ret; + + u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); + int speed = ethtool_cmd_speed(cmd); + + en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", + speed, cmd->advertising, cmd->autoneg, cmd->duplex); + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || + (cmd->duplex == DUPLEX_HALF)) return -EINVAL; - /* Nothing to change */ + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n", + ret); + return 0; + } + + proto_admin = cpu_to_be32(ptys_adv); + if (speed >= 0 && speed != priv->port_state.link_speed) + /* If speed was set then speed decides :-) */ + proto_admin = speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); + + proto_admin &= ptys_reg.eth_proto_cap; + + if (proto_admin == ptys_reg.eth_proto_admin) + return 0; /* Nothing to change */ + + if (!proto_admin) { + en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); + return -EINVAL; /* nothing to change due to bad input */ + } + + en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", + be32_to_cpu(proto_admin)); + + ptys_reg.eth_proto_admin = proto_admin; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE, + &ptys_reg); + if (ret) { + en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)", + be32_to_cpu(ptys_reg.eth_proto_admin), ret); + return ret; + } + + en_warn(priv, "Port link mode changed, restarting port...\n"); + mutex_lock(&priv->mdev->state_lock); + if (priv->port_up) { + mlx4_en_stop_port(dev, 1); + if (mlx4_en_start_port(dev)) + en_err(priv, "Failed restarting port %d\n", priv->port); + } + mutex_unlock(&priv->mdev->state_lock); return 0; } @@ -587,7 +973,34 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) return priv->rx_ring_num; } -static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key) +static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) +{ + return MLX4_EN_RSS_KEY_SIZE; +} + +static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + /* check if requested function is supported by the device */ + if ((hfunc == ETH_RSS_HASH_TOP && + !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || + (hfunc == ETH_RSS_HASH_XOR && + !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) + return -EINVAL; + + priv->rss_hash_fn = hfunc; + if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH)) + en_warn(priv, + "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); + if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH)) + en_warn(priv, + "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); + return 0; +} + +static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, + u8 *hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_rss_map *rss_map = &priv->rss_map; @@ -596,17 +1009,23 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key) int err = 0; rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; + rss_rings = 1 << ilog2(rss_rings); while (n--) { + if (!ring_index) + break; ring_index[n] = rss_map->qps[n % rss_rings].qpn - rss_map->base_qpn; } - + if (key) + memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); + if (hfunc) + *hfunc = priv->rss_hash_fn; return err; } static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, - const u8 *key) + const u8 *key, const u8 hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@ -619,6 +1038,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, * between rings */ for (i = 0; i < priv->rx_ring_num; i++) { + if (!ring_index) + continue; if (i > 0 && !ring_index[i] && !rss_rings) rss_rings = i; @@ -633,13 +1054,22 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, if (!is_power_of_2(rss_rings)) return -EINVAL; + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + err = mlx4_en_check_rxfh_func(dev, hfunc); + if (err) + return err; + } + mutex_lock(&mdev->state_lock); if (priv->port_up) { port_up = 1; mlx4_en_stop_port(dev, 1); } - priv->prof->rss_rings = rss_rings; + if (ring_index) + priv->prof->rss_rings = rss_rings; + if (key) + memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); if (port_up) { err = mlx4_en_start_port(dev); @@ -1309,6 +1739,86 @@ static int mlx4_en_set_tunable(struct net_device *dev, return ret; } +static int mlx4_en_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int ret; + u8 data[4]; + + /* Read first 2 bytes to get Module & REV ID */ + ret = mlx4_get_module_info(mdev->dev, priv->port, + 0/*offset*/, 2/*size*/, data); + if (ret < 2) + return -EIO; + + switch (data[0] /* identifier */) { + case MLX4_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case MLX4_MODULE_ID_QSFP_PLUS: + if (data[1] >= 0x3) { /* revision id */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } + break; + case MLX4_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + case MLX4_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + return -ENOSYS; + } + + return 0; +} + +static int mlx4_en_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int offset = ee->offset; + int i = 0, ret; + + if (ee->len == 0) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + en_dbg(DRV, priv, + "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", + i, offset, ee->len - i); + + ret = mlx4_get_module_info(mdev->dev, priv->port, + offset, ee->len - i, data + i); + + if (!ret) /* Done reading */ + return 0; + + if (ret < 0) { + en_err(priv, + "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", + i, offset, ee->len - i, ret); + return 0; + } + + i += ret; + offset += ret; + } + return 0; +} const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, @@ -1332,6 +1842,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_rxnfc = mlx4_en_get_rxnfc, .set_rxnfc = mlx4_en_set_rxnfc, .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, + .get_rxfh_key_size = mlx4_en_get_rxfh_key_size, .get_rxfh = mlx4_en_get_rxfh, .set_rxfh = mlx4_en_set_rxfh, .get_channels = mlx4_en_get_channels, @@ -1341,6 +1852,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_priv_flags = mlx4_en_get_priv_flags, .get_tunable = mlx4_en_get_tunable, .set_tunable = mlx4_en_set_tunable, + .get_module_info = mlx4_en_get_module_info, + .get_module_eeprom = mlx4_en_get_module_eeprom }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 2091ae88615d3fde397da0e1e04f442c96012483..9f16f754137bf2a10c9324632c3ef76b40d5eda0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -221,15 +221,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev) { struct mlx4_en_dev *mdev; int i; - int err; printk_once(KERN_INFO "%s", mlx4_en_version); mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); - if (!mdev) { - err = -ENOMEM; + if (!mdev) goto err_free_res; - } if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) goto err_free_dev; @@ -264,8 +261,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) } /* Build device profile according to supplied module parameters */ - err = mlx4_en_get_profile(mdev); - if (err) { + if (mlx4_en_get_profile(mdev)) { mlx4_err(mdev, "Bad module parameters, aborting\n"); goto err_mr; } @@ -286,10 +282,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) * Note: we cannot use the shared workqueue because of deadlocks caused * by the rtnl lock */ mdev->workqueue = create_singlethread_workqueue("mlx4_en"); - if (!mdev->workqueue) { - err = -ENOMEM; + if (!mdev->workqueue) goto err_mr; - } /* At this stage all non-port specific tasks are complete: * mark the card state as up */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 4d69e382b4e5d07ab22fe327276c867099a12ca4..6ff214de1111b8f7127740314dc2b23b6f12a5ec 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -575,7 +575,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) struct mlx4_mac_entry *entry; int index = 0; int err = 0; - u64 reg_id; + u64 reg_id = 0; int *qpn = &priv->base_qpn; u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); @@ -595,7 +595,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) return 0; } - err = mlx4_qp_reserve_range(dev, 1, 1, qpn); + err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); if (err) { en_err(priv, "Failed to reserve qp for mac registration\n"); @@ -1843,8 +1843,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) } local_bh_enable(); - while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) - msleep(1); + napi_synchronize(&cq->napi); mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); mlx4_en_deactivate_cq(priv, cq); @@ -1894,6 +1893,7 @@ static void mlx4_en_clear_stats(struct net_device *dev) priv->rx_ring[i]->packets = 0; priv->rx_ring[i]->csum_ok = 0; priv->rx_ring[i]->csum_none = 0; + priv->rx_ring[i]->csum_complete = 0; } } @@ -1974,15 +1974,8 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) { struct mlx4_en_port_profile *prof = priv->prof; int i; - int err; int node; - err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); - if (err) { - en_err(priv, "failed reserving range for TX rings\n"); - return err; - } - /* Create tx Rings */ for (i = 0; i < priv->tx_ring_num; i++) { node = cpu_to_node(i % num_online_cpus()); @@ -1991,7 +1984,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) goto err; if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], - priv->base_tx_qpn + i, prof->tx_ring_size, TXBB_SIZE, node, i)) goto err; @@ -2157,7 +2149,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } - if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) { + if (mlx4_en_reset_config(dev, config, dev->features)) { config.tx_type = HWTSTAMP_TX_OFF; config.rx_filter = HWTSTAMP_FILTER_NONE; } @@ -2190,6 +2182,16 @@ static int mlx4_en_set_features(struct net_device *netdev, netdev_features_t features) { struct mlx4_en_priv *priv = netdev_priv(netdev); + int ret = 0; + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + en_info(priv, "Turn %s RX vlan strip offload\n", + (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); + ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, + features); + if (ret) + return ret; + } if (features & NETIF_F_LOOPBACK) priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); @@ -2249,7 +2251,7 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st #define PORT_ID_BYTE_LEN 8 static int mlx4_en_get_phys_port_id(struct net_device *dev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_dev *mdev = priv->mdev->dev; @@ -2455,6 +2457,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv = netdev_priv(dev); memset(priv, 0, sizeof(struct mlx4_en_priv)); + spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); + INIT_WORK(&priv->watchdog_task, mlx4_en_restart); + INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); + INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); +#ifdef CONFIG_MLX4_EN_VXLAN + INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); + INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); +#endif +#ifdef CONFIG_RFS_ACCEL + INIT_LIST_HEAD(&priv->filters); + spin_lock_init(&priv->filters_lock); +#endif + priv->dev = dev; priv->mdev = mdev; priv->ddev = &mdev->pdev->dev; @@ -2468,6 +2485,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; priv->tx_ring_num = prof->tx_ring_num; priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; + netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, GFP_KERNEL); @@ -2486,16 +2504,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->cqe_size = mdev->dev->caps.cqe_size; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; - spin_lock_init(&priv->stats_lock); - INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); - INIT_WORK(&priv->watchdog_task, mlx4_en_restart); - INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); - INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); - INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); -#ifdef CONFIG_MLX4_EN_VXLAN - INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); - INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); -#endif #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { @@ -2513,6 +2521,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, /* Query for default mac and max mtu */ priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; + if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & + MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) + priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; + /* Set default MAC */ dev->addr_len = ETH_ALEN; mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); @@ -2538,11 +2550,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (err) goto out; -#ifdef CONFIG_RFS_ACCEL - INIT_LIST_HEAD(&priv->filters); - spin_lock_init(&priv->filters_lock); -#endif - /* Initialize time stamping config */ priv->hwtstamp_config.flags = 0; priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; @@ -2583,15 +2590,28 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->features = dev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - dev->hw_features |= NETIF_F_LOOPBACK; + dev->hw_features |= NETIF_F_LOOPBACK | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (mdev->dev->caps.steering_mode == - MLX4_STEERING_MODE_DEVICE_MANAGED) + MLX4_STEERING_MODE_DEVICE_MANAGED && + mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) dev->hw_features |= NETIF_F_NTUPLE; if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) dev->priv_flags |= IFF_UNICAST_FLT; + /* Setting a default hash function value */ + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { + priv->rss_hash_fn = ETH_RSS_HASH_TOP; + } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { + priv->rss_hash_fn = ETH_RSS_HASH_XOR; + } else { + en_warn(priv, + "No RSS hash capabilities exposed, using Toeplitz\n"); + priv->rss_hash_fn = ETH_RSS_HASH_TOP; + } + mdev->pndev[port] = dev; netif_carrier_off(dev); @@ -2650,3 +2670,79 @@ out: return err; } +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + + if (priv->hwtstamp_config.tx_type == ts_config.tx_type && + priv->hwtstamp_config.rx_filter == ts_config.rx_filter && + !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) + return 0; /* Nothing to change */ + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && + (features & NETIF_F_HW_VLAN_CTAG_RX) && + (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { + en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); + return -EINVAL; + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", + ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + priv->hwtstamp_config.tx_type = ts_config.tx_type; + priv->hwtstamp_config.rx_filter = ts_config.rx_filter; + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { + /* RX time-stamping is OFF, update the RX vlan offload + * to the latest wanted state + */ + if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + /* RX vlan offload and RX time-stamping can't co-exist ! + * Regardless of the caller's choice, + * Turn Off RX vlan offload in case of time-stamping is ON + */ + if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + +out: + mutex_unlock(&mdev->state_lock); + netdev_features_change(dev); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 0a0261d128b9b200369b2fd34d602681ea25c363..6cb80072af6c16b33f832a06a67d7e379ecb2f1f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -91,21 +91,37 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) * already synchronized, no need in locking */ state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) { + case MLX4_EN_100M_SPEED: + state->link_speed = SPEED_100; + break; case MLX4_EN_1G_SPEED: - state->link_speed = 1000; + state->link_speed = SPEED_1000; break; case MLX4_EN_10G_SPEED_XAUI: case MLX4_EN_10G_SPEED_XFI: - state->link_speed = 10000; + state->link_speed = SPEED_10000; + break; + case MLX4_EN_20G_SPEED: + state->link_speed = SPEED_20000; break; case MLX4_EN_40G_SPEED: - state->link_speed = 40000; + state->link_speed = SPEED_40000; + break; + case MLX4_EN_56G_SPEED: + state->link_speed = SPEED_56000; break; default: state->link_speed = -1; break; } - state->transciver = qport_context->transceiver; + + state->transceiver = qport_context->transceiver; + + state->flags = 0; /* Reset and recalculate the port flags */ + state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ? + MLX4_EN_PORT_ANC : 0; + state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ? + MLX4_EN_PORT_ANE : 0; out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); @@ -139,11 +155,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->rx_bytes = 0; priv->port_stats.rx_chksum_good = 0; priv->port_stats.rx_chksum_none = 0; + priv->port_stats.rx_chksum_complete = 0; for (i = 0; i < priv->rx_ring_num; i++) { stats->rx_packets += priv->rx_ring[i]->packets; stats->rx_bytes += priv->rx_ring[i]->bytes; priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; + priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; } stats->tx_packets = 0; stats->tx_bytes = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h index 745090b49d9efe2792ec010314752473f697eb11..040da4b16b1c65ee3448598550ee6b5726ccdb76 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -53,22 +53,49 @@ enum { MLX4_MCAST_ENABLE = 2, }; +enum mlx4_link_mode { + MLX4_1000BASE_CX_SGMII = 0, + MLX4_1000BASE_KX = 1, + MLX4_10GBASE_CX4 = 2, + MLX4_10GBASE_KX4 = 3, + MLX4_10GBASE_KR = 4, + MLX4_20GBASE_KR2 = 5, + MLX4_40GBASE_CR4 = 6, + MLX4_40GBASE_KR4 = 7, + MLX4_56GBASE_KR4 = 8, + MLX4_10GBASE_CR = 12, + MLX4_10GBASE_SR = 13, + MLX4_40GBASE_SR4 = 15, + MLX4_56GBASE_CR4 = 17, + MLX4_56GBASE_SR4 = 18, + MLX4_100BASE_TX = 24, + MLX4_1000BASE_T = 25, + MLX4_10GBASE_T = 26, +}; + +#define MLX4_PROT_MASK(link_mode) (1< #include +#if IS_ENABLED(CONFIG_IPV6) +#include +#endif + #include "mlx4_en.h" static int mlx4_alloc_pages(struct mlx4_en_priv *priv, @@ -74,7 +78,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, page_alloc->page_size = PAGE_SIZE << order; page_alloc->page = page; page_alloc->dma = dma; - page_alloc->page_offset = frag_info->frag_align; + page_alloc->page_offset = 0; /* Not doing get_page() for each frag is a big win * on asymetric workloads. Note we can not use atomic_set(). */ @@ -119,7 +123,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, out: while (i--) { - frag_info = &priv->frag_info[i]; if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, page_alloc[i].page_size, PCI_DMA_FROMDEVICE); @@ -157,7 +160,7 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; if (mlx4_alloc_pages(priv, &ring->page_alloc[i], - frag_info, GFP_KERNEL)) + frag_info, GFP_KERNEL | __GFP_COLD)) goto out; } return 0; @@ -269,7 +272,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size, - GFP_KERNEL)) { + GFP_KERNEL | __GFP_COLD)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { en_err(priv, "Failed to allocate enough rx buffers\n"); return -ENOMEM; @@ -636,13 +639,94 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, int index = ring->prod & ring->size_mask; while ((u32) (ring->prod - ring->cons) < ring->actual_size) { - if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC)) + if (mlx4_en_prepare_rx_desc(priv, ring, index, + GFP_ATOMIC | __GFP_COLD)) break; ring->prod++; index = ring->prod & ring->size_mask; } } +/* When hardware doesn't strip the vlan, we need to calculate the checksum + * over it and add it to the hardware's checksum calculation + */ +static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, + struct vlan_hdr *vlanh) +{ + return csum_add(hw_checksum, *(__wsum *)vlanh); +} + +/* Although the stack expects checksum which doesn't include the pseudo + * header, the HW adds it. To address that, we are subtracting the pseudo + * header checksum from the checksum value provided by the HW. + */ +static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, + struct iphdr *iph) +{ + __u16 length_for_csum = 0; + __wsum csum_pseudo_header = 0; + + length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); + csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, + length_for_csum, iph->protocol, 0); + skb->csum = csum_sub(hw_checksum, csum_pseudo_header); +} + +#if IS_ENABLED(CONFIG_IPV6) +/* In IPv6 packets, besides subtracting the pseudo header checksum, + * we also compute/add the IP header checksum which + * is not added by the HW. + */ +static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, + struct ipv6hdr *ipv6h) +{ + __wsum csum_pseudo_hdr = 0; + + if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) + return -1; + hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); + + csum_pseudo_hdr = csum_partial(&ipv6h->saddr, + sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); + + skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); + skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); + return 0; +} +#endif +static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, + int hwtstamp_rx_filter) +{ + __wsum hw_checksum = 0; + + void *hdr = (u8 *)va + sizeof(struct ethhdr); + + hw_checksum = csum_unfold((__force __sum16)cqe->checksum); + + if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) && + hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) { + /* next protocol non IPv4 or IPv6 */ + if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto + != htons(ETH_P_IP) && + ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto + != htons(ETH_P_IPV6)) + return -1; + hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); + hdr += sizeof(struct vlan_hdr); + } + + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) + get_fixed_ipv4_csum(hw_checksum, skb, hdr); +#if IS_ENABLED(CONFIG_IPV6) + else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) + if (get_fixed_ipv6_csum(hw_checksum, skb, hdr)) + return -1; +#endif + return 0; +} + int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -744,73 +828,96 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); if (likely(dev->features & NETIF_F_RXCSUM)) { - if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && - (cqe->checksum == cpu_to_be16(0xffff))) { - ring->csum_ok++; - /* This packet is eligible for GRO if it is: - * - DIX Ethernet (type interpretation) - * - TCP/IP (v4) - * - without IP options - * - not an IP fragment - * - no LLS polling in progress - */ - if (!mlx4_en_cq_busy_polling(cq) && - (dev->features & NETIF_F_GRO)) { - struct sk_buff *gro_skb = napi_get_frags(&cq->napi); - if (!gro_skb) - goto next; - - nr = mlx4_en_complete_rx_desc(priv, - rx_desc, frags, gro_skb, - length); - if (!nr) - goto next; + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | + MLX4_CQE_STATUS_UDP)) { + if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && + cqe->checksum == cpu_to_be16(0xffff)) { + ip_summed = CHECKSUM_UNNECESSARY; + ring->csum_ok++; + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + } else { + if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | + MLX4_CQE_STATUS_IPV6))) { + ip_summed = CHECKSUM_COMPLETE; + ring->csum_complete++; + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + } + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } - skb_shinfo(gro_skb)->nr_frags = nr; - gro_skb->len = length; - gro_skb->data_len = length; - gro_skb->ip_summed = CHECKSUM_UNNECESSARY; + /* This packet is eligible for GRO if it is: + * - DIX Ethernet (type interpretation) + * - TCP/IP (v4) + * - without IP options + * - not an IP fragment + * - no LLS polling in progress + */ + if (!mlx4_en_cq_busy_polling(cq) && + (dev->features & NETIF_F_GRO)) { + struct sk_buff *gro_skb = napi_get_frags(&cq->napi); + if (!gro_skb) + goto next; + + nr = mlx4_en_complete_rx_desc(priv, + rx_desc, frags, gro_skb, + length); + if (!nr) + goto next; + + if (ip_summed == CHECKSUM_COMPLETE) { + void *va = skb_frag_address(skb_shinfo(gro_skb)->frags); + if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + ring->csum_complete--; + } + } - if (l2_tunnel) - gro_skb->csum_level = 1; - if ((cqe->vlan_my_qpn & - cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && - (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { - u16 vid = be16_to_cpu(cqe->sl_vid); + skb_shinfo(gro_skb)->nr_frags = nr; + gro_skb->len = length; + gro_skb->data_len = length; + gro_skb->ip_summed = ip_summed; - __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); - } + if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) + gro_skb->csum_level = 1; - if (dev->features & NETIF_F_RXHASH) - skb_set_hash(gro_skb, - be32_to_cpu(cqe->immed_rss_invalid), - PKT_HASH_TYPE_L3); + if ((cqe->vlan_my_qpn & + cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + u16 vid = be16_to_cpu(cqe->sl_vid); - skb_record_rx_queue(gro_skb, cq->ring); - skb_mark_napi_id(gro_skb, &cq->napi); + __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); + } - if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { - timestamp = mlx4_en_get_cqe_ts(cqe); - mlx4_en_fill_hwtstamps(mdev, - skb_hwtstamps(gro_skb), - timestamp); - } + if (dev->features & NETIF_F_RXHASH) + skb_set_hash(gro_skb, + be32_to_cpu(cqe->immed_rss_invalid), + PKT_HASH_TYPE_L3); - napi_gro_frags(&cq->napi); - goto next; - } + skb_record_rx_queue(gro_skb, cq->ring); + skb_mark_napi_id(gro_skb, &cq->napi); - /* GRO not possible, complete processing here */ - ip_summed = CHECKSUM_UNNECESSARY; - } else { - ip_summed = CHECKSUM_NONE; - ring->csum_none++; + if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { + timestamp = mlx4_en_get_cqe_ts(cqe); + mlx4_en_fill_hwtstamps(mdev, + skb_hwtstamps(gro_skb), + timestamp); } - } else { - ip_summed = CHECKSUM_NONE; - ring->csum_none++; + + napi_gro_frags(&cq->napi); + goto next; } + /* GRO not possible, complete processing here */ skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); if (!skb) { priv->stats.rx_dropped++; @@ -822,6 +929,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud goto next; } + if (ip_summed == CHECKSUM_COMPLETE) { + if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) { + ip_summed = CHECKSUM_NONE; + ring->csum_complete--; + ring->csum_none++; + } + } + skb->ip_summed = ip_summed; skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); @@ -879,8 +994,8 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq) struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); - if (priv->port_up) - napi_schedule(&cq->napi); + if (likely(priv->port_up)) + napi_schedule_irqoff(&cq->napi); else mlx4_en_arm_cq(priv, cq); } @@ -910,20 +1025,18 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) cpu_curr = smp_processor_id(); aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; - if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) { - /* Current cpu is not according to smp_irq_affinity - - * probably affinity changed. need to stop this NAPI - * poll, and restart it on the right CPU - */ - napi_complete(napi); - mlx4_en_arm_cq(priv, cq); - return 0; - } - } else { - /* Done for now */ - napi_complete(napi); - mlx4_en_arm_cq(priv, cq); + if (likely(cpumask_test_cpu(cpu_curr, aff))) + return budget; + + /* Current cpu is not according to smp_irq_affinity - + * probably affinity changed. need to stop this NAPI + * poll, and restart it on the right CPU + */ + done = 0; } + /* Done for now */ + napi_complete_done(napi, done); + mlx4_en_arm_cq(priv, cq); return done; } @@ -946,15 +1059,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) (eff_mtu > buf_size + frag_sizes[i]) ? frag_sizes[i] : eff_mtu - buf_size; priv->frag_info[i].frag_prefix_size = buf_size; - if (!i) { - priv->frag_info[i].frag_align = NET_IP_ALIGN; - priv->frag_info[i].frag_stride = - ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES); - } else { - priv->frag_info[i].frag_align = 0; - priv->frag_info[i].frag_stride = - ALIGN(frag_sizes[i], SMP_CACHE_BYTES); - } + priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i], + SMP_CACHE_BYTES); buf_size += priv->frag_info[i].frag_size; i++; } @@ -967,11 +1073,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { en_err(priv, - " frag:%d - size:%d prefix:%d align:%d stride:%d\n", + " frag:%d - size:%d prefix:%d stride:%d\n", i, priv->frag_info[i].frag_size, priv->frag_info[i].frag_prefix_size, - priv->frag_info[i].frag_align, priv->frag_info[i].frag_stride); } } @@ -1026,7 +1131,8 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) int err; u32 qpn; - err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn); + err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, + MLX4_RESERVE_A0_QP); if (err) { en_err(priv, "Failed reserving drop qpn\n"); return err; @@ -1065,14 +1171,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) int i, qpn; int err = 0; int good_qps = 0; - static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC, - 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD, - 0x593D56D9, 0xF3253C06, 0x2ADC1FFC}; en_dbg(DRV, priv, "Configuring rss steering\n"); err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, priv->rx_ring_num, - &rss_map->base_qpn); + &rss_map->base_qpn, 0); if (err) { en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); return err; @@ -1122,9 +1225,19 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) rss_context->flags = rss_mask; rss_context->hash_fn = MLX4_RSS_HASH_TOP; - for (i = 0; i < 10; i++) - rss_context->rss_key[i] = cpu_to_be32(rsskey[i]); - + if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { + rss_context->hash_fn = MLX4_RSS_HASH_XOR; + } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { + rss_context->hash_fn = MLX4_RSS_HASH_TOP; + memcpy(rss_context->rss_key, priv->rss_key, + MLX4_EN_RSS_KEY_SIZE); + netdev_rss_key_fill(rss_context->rss_key, + MLX4_EN_RSS_KEY_SIZE); + } else { + en_err(priv, "Unknown RSS hash function requested\n"); + err = -EINVAL; + goto indir_err; + } err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, &rss_map->indir_qp, &rss_map->indir_state); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 49d5afc7cfb84cfcd413b73d6e7ff9b938802256..2d8ee66138e8ad48cb72daa773a67c1f421e4cac 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -129,11 +129,15 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv) if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; - /* The device supports 1G, 10G and 40G speeds */ - if (priv->port_state.link_speed != 1000 && - priv->port_state.link_speed != 10000 && - priv->port_state.link_speed != 40000) + /* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */ + if (priv->port_state.link_speed != SPEED_100 && + priv->port_state.link_speed != SPEED_1000 && + priv->port_state.link_speed != SPEED_10000 && + priv->port_state.link_speed != SPEED_20000 && + priv->port_state.link_speed != SPEED_40000 && + priv->port_state.link_speed != SPEED_56000) return priv->port_state.link_speed; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 454d9fea640ecb0aa849b8d8106695d017ca3309..a308d41e4de08678107b185df2c7b8b7df3b39a7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -46,7 +46,7 @@ #include "mlx4_en.h" int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, - struct mlx4_en_tx_ring **pring, int qpn, u32 size, + struct mlx4_en_tx_ring **pring, u32 size, u16 stride, int node, int queue_index) { struct mlx4_en_dev *mdev = priv->mdev; @@ -112,11 +112,17 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring, ring->buf, ring->size, ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); - ring->qpn = qpn; + err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, + MLX4_RESERVE_ETH_BF_QP); + if (err) { + en_err(priv, "failed reserving qp for TX ring\n"); + goto err_map; + } + err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); - goto err_map; + goto err_reserve; } ring->qp.event = mlx4_en_sqp_event; @@ -143,6 +149,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, *pring = ring; return 0; +err_reserve: + mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_map: mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: @@ -479,8 +487,8 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq) struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); - if (priv->port_up) - napi_schedule(&cq->napi); + if (likely(priv->port_up)) + napi_schedule_irqoff(&cq->napi); else mlx4_en_arm_cq(priv, cq); } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 49290a4059039a72c86e119c59d2d03ec6c3eb4c..3d275fbaf0eb05df05159a06fed22812469b6200 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -450,7 +450,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_eqe *eqe; - int cqn; + int cqn = -1; int eqes_found = 0; int set_ci = 0; int port; @@ -758,6 +758,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq_set_ci(eq, 1); + /* cqn is 24bit wide but is initialized such that its higher bits + * are ones too. Thus, if we got any event, cqn's high bits should be off + * and we need to schedule the tasklet. + */ + if (!(cqn & ~0xffffff)) + tasklet_schedule(&eq->tasklet_ctx.task); + return eqes_found; } @@ -971,6 +978,12 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, eq->cons_index = 0; + INIT_LIST_HEAD(&eq->tasklet_ctx.list); + INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); + spin_lock_init(&eq->tasklet_ctx.lock); + tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb, + (unsigned long)&eq->tasklet_ctx); + return err; err_out_free_mtt: @@ -1027,6 +1040,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, } } synchronize_irq(eq->irq); + tasklet_disable(&eq->tasklet_ctx.task); mlx4_mtt_cleanup(dev, &eq->mtt); for (i = 0; i < npages; ++i) @@ -1123,8 +1137,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) goto err_out_free; } - err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, - dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); + err = mlx4_bitmap_init(&priv->eq_table.bitmap, + roundup_pow_of_two(dev->caps.num_eqs), + dev->caps.num_eqs - 1, + dev->caps.reserved_eqs, + roundup_pow_of_two(dev->caps.num_eqs) - + dev->caps.num_eqs); if (err) goto err_out_free; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 2e88a235e26b47b5fc85ad2a6c5ea4a494997e14..ef3b95bac2adc46a7bf976bda2a3f51988aa2ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -139,7 +139,13 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [10] = "TCP/IP offloads/flow-steering for VXLAN support", [11] = "MAD DEMUX (Secure-Host) support", [12] = "Large cache line (>64B) CQE stride support", - [13] = "Large cache line (>64B) EQE stride support" + [13] = "Large cache line (>64B) EQE stride support", + [14] = "Ethernet protocol control support", + [15] = "Ethernet Backplane autoneg support", + [16] = "CONFIG DEV support", + [17] = "Asymmetric EQs support", + [18] = "More than 80 VFs support", + [19] = "Performance optimized for limited rule configuration flow steering support" }; int i; @@ -174,6 +180,61 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) return err; } +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 in_modifier; + u8 field; + u16 field16; + int err; + +#define QUERY_FUNC_BUS_OFFSET 0x00 +#define QUERY_FUNC_DEVICE_OFFSET 0x01 +#define QUERY_FUNC_FUNCTION_OFFSET 0x01 +#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 +#define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 +#define QUERY_FUNC_MAX_EQ_OFFSET 0x06 +#define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + in_modifier = slave; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, + MLX4_CMD_QUERY_FUNC, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); + func->bus = field & 0xf; + MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); + func->device = field & 0xf1; + MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); + func->function = field & 0x7; + MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); + func->physical_function = field & 0xf; + MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); + func->rsvd_eqs = field16 & 0xffff; + MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); + func->max_eq = field16 & 0xffff; + MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); + func->rsvd_uars = field & 0x0f; + + mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", + func->bus, func->device, func->function, func->physical_function, + func->max_eq, func->rsvd_eqs, func->rsvd_uars); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -184,6 +245,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, u8 field, port; u32 size, proxy_qp, qkey; int err = 0; + struct mlx4_func func; #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 @@ -205,10 +267,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 +#define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c + #define QUERY_FUNC_CAP_FMR_FLAG 0x80 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 +#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 + +#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) +#define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30) /* when opcode modifier = 1 */ #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 @@ -228,6 +296,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 +#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31) if (vhcr->op_modifier == 1) { struct mlx4_active_ports actv_ports = @@ -277,7 +346,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, mlx4_get_active_ports(dev, slave); /* enable rdma and ethernet interfaces, and new quota locations */ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | - QUERY_FUNC_CAP_FLAG_QUOTAS); + QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); field = min( @@ -306,11 +375,24 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, size = dev->caps.num_cqs; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); - size = dev->caps.num_eqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); - - size = dev->caps.reserved_eqs; - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || + mlx4_QUERY_FUNC(dev, &func, slave)) { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + dev->caps.num_eqs : + rounddown_pow_of_two(dev->caps.num_eqs); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = dev->caps.reserved_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } else { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + func.max_eq : + rounddown_pow_of_two(func.max_eq); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = func.rsvd_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); @@ -326,13 +408,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); + size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | + QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); } else err = -EINVAL; return err; } -int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, struct mlx4_func_cap *func_cap) { struct mlx4_cmd_mailbox *mailbox; @@ -340,14 +425,17 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, u8 field, op_modifier; u32 size, qkey; int err = 0, quotas = 0; + u32 in_modifier; op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ + in_modifier = op_modifier ? gen_or_port : + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier, + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, MLX4_CMD_QUERY_FUNC_CAP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) @@ -415,6 +503,19 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); func_cap->reserved_eq = size & 0xFFFFFF; + func_cap->extra_flags = 0; + + /* Mailbox data from 0x6c and onward should only be treated if + * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags + */ + if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); + if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG) + func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP; + if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG) + func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP; + } + goto out; } @@ -519,6 +620,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 +#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b @@ -560,6 +662,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a +#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 @@ -571,11 +674,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 +#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 +#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d #define QUERY_DEV_CAP_VXLAN 0x9e #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 +#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 +#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -605,7 +712,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); dev_cap->max_mpts = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); - dev_cap->reserved_eqs = field & 0xf; + dev_cap->reserved_eqs = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); dev_cap->max_eqs = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); @@ -616,6 +723,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->reserved_mrws = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); dev_cap->max_mtt_seg = 1 << (field & 0x3f); + MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); + dev_cap->num_sys_eqs = size & 0xfff; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); dev_cap->max_requester_per_qp = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); @@ -737,15 +846,22 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); dev_cap->max_rq_desc_sz = size; MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + if (field & (1 << 5)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; if (field & (1 << 6)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; if (field & (1 << 7)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; - MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + if (field & 0x20) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; MLX4_GET(dev_cap->reserved_lkey, outbox, QUERY_DEV_CAP_RSVD_LKEY_OFFSET); + MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); + if (field32 & (1 << 0)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); if (field & 1<<6) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; @@ -763,6 +879,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) if (field32 & (1 << 0)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; + MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox, + QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET); + dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK; + MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox, + QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); + dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); if (field32 & (1 << 16)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; @@ -770,62 +893,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; if (field32 & (1 << 20)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; + if (field32 & (1 << 21)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; - if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { - for (i = 1; i <= dev_cap->num_ports; ++i) { - MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); - dev_cap->max_vl[i] = field >> 4; - MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); - dev_cap->ib_mtu[i] = field >> 4; - dev_cap->max_port_width[i] = field & 0xf; - MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); - dev_cap->max_gids[i] = 1 << (field & 0xf); - MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); - dev_cap->max_pkeys[i] = 1 << (field & 0xf); - } - } else { -#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 -#define QUERY_PORT_MTU_OFFSET 0x01 -#define QUERY_PORT_ETH_MTU_OFFSET 0x02 -#define QUERY_PORT_WIDTH_OFFSET 0x06 -#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 -#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a -#define QUERY_PORT_MAX_VL_OFFSET 0x0b -#define QUERY_PORT_MAC_OFFSET 0x10 -#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 -#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c -#define QUERY_PORT_TRANS_CODE_OFFSET 0x20 - - for (i = 1; i <= dev_cap->num_ports; ++i) { - err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); - if (err) - goto out; - - MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); - dev_cap->supported_port_types[i] = field & 3; - dev_cap->suggested_type[i] = (field >> 3) & 1; - dev_cap->default_sense[i] = (field >> 4) & 1; - MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); - dev_cap->ib_mtu[i] = field & 0xf; - MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); - dev_cap->max_port_width[i] = field & 0xf; - MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); - dev_cap->max_gids[i] = 1 << (field >> 4); - dev_cap->max_pkeys[i] = 1 << (field & 0xf); - MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); - dev_cap->max_vl[i] = field & 0xf; - MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); - dev_cap->log_max_macs[i] = field & 0xf; - dev_cap->log_max_vlans[i] = field >> 4; - MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); - MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); - MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); - dev_cap->trans_type[i] = field32 >> 24; - dev_cap->vendor_oui[i] = field32 & 0xffffff; - MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET); - MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET); - } + for (i = 1; i <= dev_cap->num_ports; i++) { + err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i); + if (err) + goto out; } mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", @@ -836,8 +910,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) * we can't use any EQs whose doorbell falls on that page, * even if the EQ itself isn't reserved. */ - dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, - dev_cap->reserved_eqs); + if (dev_cap->num_sys_eqs == 0) + dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, + dev_cap->reserved_eqs); + else + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; mlx4_dbg(dev, "Max ICM size %lld MB\n", (unsigned long long) dev_cap->max_icm_sz >> 20); @@ -847,8 +924,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); - mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", - dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); + mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, + dev_cap->eqc_entry_sz); mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", dev_cap->reserved_mrws, dev_cap->reserved_mtts); mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", @@ -858,8 +936,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", - dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1], - dev_cap->max_port_width[1]); + dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu, + dev_cap->port_cap[1].max_port_width); mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", @@ -867,6 +945,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); + mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n", + dev_cap->dmfs_high_rate_qpn_base); + mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", + dev_cap->dmfs_high_rate_qpn_range); dump_dev_cap_flags(dev, dev_cap->flags); dump_dev_cap_flags2(dev, dev_cap->flags2); @@ -876,6 +958,89 @@ out: return err; } +int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field; + u32 field32; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + port_cap->max_vl = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); + port_cap->ib_mtu = field >> 4; + port_cap->max_port_width = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); + port_cap->max_gids = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); + port_cap->max_pkeys = 1 << (field & 0xf); + } else { +#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 +#define QUERY_PORT_MTU_OFFSET 0x01 +#define QUERY_PORT_ETH_MTU_OFFSET 0x02 +#define QUERY_PORT_WIDTH_OFFSET 0x06 +#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 +#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a +#define QUERY_PORT_MAX_VL_OFFSET 0x0b +#define QUERY_PORT_MAC_OFFSET 0x10 +#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 +#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c +#define QUERY_PORT_TRANS_CODE_OFFSET 0x20 + + err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); + port_cap->supported_port_types = field & 3; + port_cap->suggested_type = (field >> 3) & 1; + port_cap->default_sense = (field >> 4) & 1; + port_cap->dmfs_optimized_state = (field >> 5) & 1; + MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); + port_cap->ib_mtu = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); + port_cap->max_port_width = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); + port_cap->max_gids = 1 << (field >> 4); + port_cap->max_pkeys = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); + port_cap->max_vl = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); + port_cap->log_max_macs = field & 0xf; + port_cap->log_max_vlans = field >> 4; + MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET); + MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET); + MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); + port_cap->trans_type = field32 >> 24; + port_cap->vendor_oui = field32 & 0xffffff; + MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET); + MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +#define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26) +#define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21) +#define DEV_CAP_EXT_2_FLAG_FSM (1 << 20) + int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -885,7 +1050,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, u64 flags; int err = 0; u8 field; - u32 bmme_flags; + u32 bmme_flags, field32; int real_port; int slave_port; int first_port; @@ -956,6 +1121,12 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, field &= ~0x80; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + /* turn off host side virt features (VST, FSM, etc) for guests */ + MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS | + DEV_CAP_EXT_2_FLAG_FSM); + MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + return 0; } @@ -1374,6 +1545,12 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) struct mlx4_cmd_mailbox *mailbox; __be32 *inbox; int err; + static const u8 a0_dmfs_hw_steering[] = { + [MLX4_STEERING_DMFS_A0_DEFAULT] = 0, + [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1, + [MLX4_STEERING_DMFS_A0_STATIC] = 2, + [MLX4_STEERING_DMFS_A0_DISABLE] = 3 + }; #define INIT_HCA_IN_SIZE 0x200 #define INIT_HCA_VERSION_OFFSET 0x000 @@ -1394,6 +1571,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) +#define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) #define INIT_HCA_MCAST_OFFSET 0x0c0 @@ -1406,6 +1584,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) +#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) @@ -1497,6 +1676,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); @@ -1515,8 +1695,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) /* Enable Ethernet flow steering * with udp unicast and tcp unicast */ - MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), - INIT_HCA_FS_ETH_BITS_OFFSET); + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_STATIC) + MLX4_PUT(inbox, + (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), + INIT_HCA_FS_ETH_BITS_OFFSET); MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); /* Enable IPoIB flow steering @@ -1526,6 +1709,13 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) INIT_HCA_FS_IB_BITS_OFFSET); MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); + + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + MLX4_PUT(inbox, + ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode] + << 6)), + INIT_HCA_FS_A0_OFFSET); } else { MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); MLX4_PUT(inbox, param->log_mc_entry_sz, @@ -1576,6 +1766,12 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, u32 dword_field; int err; u8 byte_field; + static const u8 a0_dmfs_query_hw_steering[] = { + [0] = MLX4_STEERING_DMFS_A0_DEFAULT, + [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, + [2] = MLX4_STEERING_DMFS_A0_STATIC, + [3] = MLX4_STEERING_DMFS_A0_DISABLE + }; #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c @@ -1607,6 +1803,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); + MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); @@ -1627,6 +1824,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); MLX4_GET(param->log_mc_table_sz, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + MLX4_GET(byte_field, outbox, + INIT_HCA_FS_A0_OFFSET); + param->dmfs_high_steer_mode = + a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; } else { MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); MLX4_GET(param->log_mc_entry_sz, outbox, @@ -1841,14 +2042,18 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) struct mlx4_config_dev { __be32 update_flags; - __be32 rsdv1[3]; + __be32 rsvd1[3]; __be16 vxlan_udp_dport; __be16 rsvd2; + __be32 rsvd3[27]; + __be16 rsvd4; + u8 rsvd5; + u8 rx_checksum_val; }; #define MLX4_VXLAN_UDP_DPORT (1 << 0) -static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) { int err; struct mlx4_cmd_mailbox *mailbox; @@ -1866,6 +2071,77 @@ static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_ return err; } +static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + if (!err) + memcpy(config_dev, mailbox->buf, sizeof(*config_dev)); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +/* Conversion between the HW values and the actual functionality. + * The value represented by the array index, + * and the functionality determined by the flags. + */ +static const u8 config_dev_csum_flags[] = { + [0] = 0, + [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP, + [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP | + MLX4_RX_CSUM_MODE_L4, + [3] = MLX4_RX_CSUM_MODE_L4 | + MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP | + MLX4_RX_CSUM_MODE_MULTI_VLAN +}; + +int mlx4_config_dev_retrieval(struct mlx4_dev *dev, + struct mlx4_config_dev_params *params) +{ + struct mlx4_config_dev config_dev; + int err; + u8 csum_mask; + +#define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7 +#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0 +#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) + return -ENOTSUPP; + + err = mlx4_CONFIG_DEV_get(dev, &config_dev); + if (err) + return err; + + csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & + CONFIG_DEV_RX_CSUM_MODE_MASK; + + if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + return -EINVAL; + params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; + + csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & + CONFIG_DEV_RX_CSUM_MODE_MASK; + + if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + return -EINVAL; + params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; + + params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval); + int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) { struct mlx4_config_dev config_dev; @@ -1874,7 +2150,7 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); config_dev.vxlan_udp_dport = udp_port; - return mlx4_CONFIG_DEV(dev, &config_dev); + return mlx4_CONFIG_DEV_set(dev, &config_dev); } EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); @@ -2144,3 +2420,142 @@ out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } + +/* Access Reg commands */ +enum mlx4_access_reg_masks { + MLX4_ACCESS_REG_STATUS_MASK = 0x7f, + MLX4_ACCESS_REG_METHOD_MASK = 0x7f, + MLX4_ACCESS_REG_LEN_MASK = 0x7ff +}; + +struct mlx4_access_reg { + __be16 constant1; + u8 status; + u8 resrvd1; + __be16 reg_id; + u8 method; + u8 constant2; + __be32 resrvd2[2]; + __be16 len_const; + __be16 resrvd3; +#define MLX4_ACCESS_REG_HEADER_SIZE (20) + u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; +} __attribute__((__packed__)); + +/** + * mlx4_ACCESS_REG - Generic access reg command. + * @dev: mlx4_dev. + * @reg_id: register ID to access. + * @method: Access method Read/Write. + * @reg_len: register length to Read/Write in bytes. + * @reg_data: reg_data pointer to Read/Write From/To. + * + * Access ConnectX registers FW command. + * Returns 0 on success and copies outbox mlx4_access_reg data + * field into reg_data or a negative error code. + */ +static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, + enum mlx4_access_reg_method method, + u16 reg_len, void *reg_data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_access_reg *inbuf, *outbuf; + int err; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inbuf = inbox->buf; + outbuf = outbox->buf; + + inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); + inbuf->constant2 = 0x1; + inbuf->reg_id = cpu_to_be16(reg_id); + inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; + + reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); + inbuf->len_const = + cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | + ((0x3) << 12)); + + memcpy(inbuf->reg_data, reg_data, reg_len); + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, + MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { + err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; + mlx4_err(dev, + "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", + reg_id, err); + goto out; + } + + memcpy(reg_data, outbuf->reg_data, reg_len); +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return err; +} + +/* ConnectX registers IDs */ +enum mlx4_reg_id { + MLX4_REG_ID_PTYS = 0x5004, +}; + +/** + * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) + * register + * @dev: mlx4_dev. + * @method: Access method Read/Write. + * @ptys_reg: PTYS register data pointer. + * + * Access ConnectX PTYS register, to Read/Write Port Type/Speed + * configuration + * Returns 0 on success or a negative error code. + */ +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg) +{ + return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, + method, sizeof(*ptys_reg), ptys_reg); +} +EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); + +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_access_reg *inbuf = inbox->buf; + u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; + u16 reg_id = be16_to_cpu(inbuf->reg_id); + + if (slave != mlx4_master_func_num(dev) && + method == MLX4_ACCESS_REG_WRITE) + return -EPERM; + + if (reg_id == MLX4_REG_ID_PTYS) { + struct mlx4_ptys_reg *ptys_reg = + (struct mlx4_ptys_reg *)inbuf->reg_data; + + ptys_reg->local_port = + mlx4_slave_convert_port(dev, slave, + ptys_reg->local_port); + } + + return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, + 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 9b835aecac96c5aa87f3bbc7b256f2fa4da164b7..794e2826609a32af4eb3205dc8dfb137dbaabc37 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -43,6 +43,26 @@ struct mlx4_mod_stat_cfg { u8 log_pg_sz_m; }; +struct mlx4_port_cap { + u8 supported_port_types; + u8 suggested_type; + u8 default_sense; + u8 log_max_macs; + u8 log_max_vlans; + int ib_mtu; + int max_port_width; + int max_vl; + int max_gids; + int max_pkeys; + u64 def_mac; + u16 eth_mtu; + int trans_type; + int vendor_oui; + u16 wavelength; + u64 trans_code; + u8 dmfs_optimized_state; +}; + struct mlx4_dev_cap { int max_srq_sz; int max_qp_sz; @@ -56,6 +76,7 @@ struct mlx4_dev_cap { int max_mpts; int reserved_eqs; int max_eqs; + int num_sys_eqs; int reserved_mtts; int max_mrw_sz; int reserved_mrws; @@ -66,17 +87,6 @@ struct mlx4_dev_cap { int local_ca_ack_delay; int num_ports; u32 max_msg_sz; - int ib_mtu[MLX4_MAX_PORTS + 1]; - int max_port_width[MLX4_MAX_PORTS + 1]; - int max_vl[MLX4_MAX_PORTS + 1]; - int max_gids[MLX4_MAX_PORTS + 1]; - int max_pkeys[MLX4_MAX_PORTS + 1]; - u64 def_mac[MLX4_MAX_PORTS + 1]; - u16 eth_mtu[MLX4_MAX_PORTS + 1]; - int trans_type[MLX4_MAX_PORTS + 1]; - int vendor_oui[MLX4_MAX_PORTS + 1]; - u16 wavelength[MLX4_MAX_PORTS + 1]; - u64 trans_code[MLX4_MAX_PORTS + 1]; u16 stat_rate_support; int fs_log_max_ucast_qp_range_size; int fs_max_num_qp_per_entry; @@ -114,12 +124,10 @@ struct mlx4_dev_cap { u64 max_icm_sz; int max_gso_sz; int max_rss_tbl_sz; - u8 supported_port_types[MLX4_MAX_PORTS + 1]; - u8 suggested_type[MLX4_MAX_PORTS + 1]; - u8 default_sense[MLX4_MAX_PORTS + 1]; - u8 log_max_macs[MLX4_MAX_PORTS + 1]; - u8 log_max_vlans[MLX4_MAX_PORTS + 1]; u32 max_counters; + u32 dmfs_high_rate_qpn_base; + u32 dmfs_high_rate_qpn_range; + struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; }; struct mlx4_func_cap { @@ -143,6 +151,17 @@ struct mlx4_func_cap { u8 port_flags; u8 flags1; u64 phys_port_id; + u32 extra_flags; +}; + +struct mlx4_func { + int bus; + int device; + int function; + int physical_function; + int rsvd_eqs; + int max_eq; + int rsvd_uars; }; struct mlx4_adapter { @@ -170,6 +189,7 @@ struct mlx4_init_hca_param { u8 log_num_srqs; u8 log_num_cqs; u8 log_num_eqs; + u16 num_sys_eqs; u8 log_rd_per_qp; u8 log_mc_table_sz; u8 log_mpt_sz; @@ -177,6 +197,7 @@ struct mlx4_init_hca_param { u8 mw_enabled; /* Enable memory windows */ u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 steering_mode; /* for QUERY_HCA */ + u8 dmfs_high_steer_mode; /* for QUERY_HCA */ u64 dev_cap_enabled; u16 cqe_size; /* For use only when CQE stride feature enabled */ u16 eqe_size; /* For use only when EQE stride feature enabled */ @@ -204,13 +225,15 @@ struct mlx4_set_ib_param { }; int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); -int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, +int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap); +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, struct mlx4_func_cap *func_cap); int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave); int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); int mlx4_UNMAP_FA(struct mlx4_dev *dev); int mlx4_RUN_FW(struct mlx4_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 90de6e1ad06e9eb0bf002d9c1f87a5d80019fffe..e25436b24ce7a50b4de4f7c425099bb6dc7bd2a8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -105,7 +105,8 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ - MLX4_FUNC_CAP_EQE_CQE_STRIDE) + MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ + MLX4_FUNC_CAP_DMFS_A0_STATIC) static char mlx4_version[] = DRV_NAME ": Mellanox ConnectX core driver v" @@ -197,6 +198,29 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev) dev->caps.port_mask[i] = dev->caps.port_type[i]; } +enum { + MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, +}; + +static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + int err = 0; + struct mlx4_func func; + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_QUERY_FUNC(dev, &func, 0); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + return err; + } + dev_cap->max_eqs = func.max_eq; + dev_cap->reserved_eqs = func.rsvd_eqs; + dev_cap->reserved_uars = func.rsvd_uars; + err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; + } + return err; +} + static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) { struct mlx4_caps *dev_cap = &dev->caps; @@ -231,6 +255,46 @@ static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) } } +static int _mlx4_dev_port(struct mlx4_dev *dev, int port, + struct mlx4_port_cap *port_cap) +{ + dev->caps.vl_cap[port] = port_cap->max_vl; + dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; + dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; + dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; + /* set gid and pkey table operating lengths by default + * to non-sriov values + */ + dev->caps.gid_table_len[port] = port_cap->max_gids; + dev->caps.pkey_table_len[port] = port_cap->max_pkeys; + dev->caps.port_width_cap[port] = port_cap->max_port_width; + dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; + dev->caps.def_mac[port] = port_cap->def_mac; + dev->caps.supported_type[port] = port_cap->supported_port_types; + dev->caps.suggested_type[port] = port_cap->suggested_type; + dev->caps.default_sense[port] = port_cap->default_sense; + dev->caps.trans_type[port] = port_cap->trans_type; + dev->caps.vendor_oui[port] = port_cap->vendor_oui; + dev->caps.wavelength[port] = port_cap->wavelength; + dev->caps.trans_code[port] = port_cap->trans_code; + + return 0; +} + +static int mlx4_dev_port(struct mlx4_dev *dev, int port, + struct mlx4_port_cap *port_cap) +{ + int err = 0; + + err = mlx4_QUERY_PORT(dev, port, port_cap); + + if (err) + mlx4_err(dev, "QUERY_PORT command failed.\n"); + + return err; +} + +#define MLX4_A0_STEERING_TABLE_SIZE 256 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err; @@ -261,26 +325,16 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) } dev->caps.num_ports = dev_cap->num_ports; - dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; + dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; + dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? + dev->caps.num_sys_eqs : + MLX4_MAX_EQ_NUM; for (i = 1; i <= dev->caps.num_ports; ++i) { - dev->caps.vl_cap[i] = dev_cap->max_vl[i]; - dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; - dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i]; - dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i]; - /* set gid and pkey table operating lengths by default - * to non-sriov values */ - dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; - dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; - dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; - dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; - dev->caps.def_mac[i] = dev_cap->def_mac[i]; - dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; - dev->caps.suggested_type[i] = dev_cap->suggested_type[i]; - dev->caps.default_sense[i] = dev_cap->default_sense[i]; - dev->caps.trans_type[i] = dev_cap->trans_type[i]; - dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; - dev->caps.wavelength[i] = dev_cap->wavelength[i]; - dev->caps.trans_code[i] = dev_cap->trans_code[i]; + err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); + if (err) { + mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); + return err; + } } dev->caps.uar_page_size = PAGE_SIZE; @@ -389,13 +443,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.possible_type[i] = dev->caps.port_type[i]; } - if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { - dev->caps.log_num_macs = dev_cap->log_max_macs[i]; + if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { + dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_macs); } - if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { - dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; + if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { + dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_vlans); } @@ -411,6 +465,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.num_ports; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; + if (dev_cap->dmfs_high_rate_qpn_base > 0 && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) + dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; + else + dev->caps.dmfs_high_rate_qpn_base = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + + if (dev_cap->dmfs_high_rate_qpn_range > 0 && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { + dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; + dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; + dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; + } else { + dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; + dev->caps.dmfs_high_rate_qpn_base = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; + } + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = + dev->caps.dmfs_high_rate_qpn_range; + dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + @@ -440,8 +516,14 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) mlx4_is_master(dev)) dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; - if (!mlx4_is_slave(dev)) + if (!mlx4_is_slave(dev)) { mlx4_enable_cqe_eqe_stride(dev); + dev->caps.alloc_res_qp_mask = + (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | + MLX4_RESERVE_A0_QP; + } else { + dev->caps.alloc_res_qp_mask = 0; + } return 0; } @@ -631,7 +713,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) struct mlx4_dev_cap dev_cap; struct mlx4_func_cap func_cap; struct mlx4_init_hca_param hca_param; - int i; + u8 i; memset(&hca_param, 0, sizeof(hca_param)); err = mlx4_QUERY_HCA(dev, &hca_param); @@ -692,7 +774,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != PF_CONTEXT_BEHAVIOUR_MASK) { - mlx4_err(dev, "Unknown pf context behaviour\n"); + mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", + func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); return -ENOSYS; } @@ -732,7 +815,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) } for (i = 1; i <= dev->caps.num_ports; ++i) { - err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); + err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); if (err) { mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", i, err); @@ -791,6 +874,13 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) slave_adjust_steering_mode(dev, &dev_cap, &hca_param); + if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && + dev->caps.bf_reg_size) + dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; + + if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) + dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; + return 0; err_mem: @@ -901,9 +991,12 @@ static ssize_t set_port_type(struct device *dev, struct mlx4_priv *priv = mlx4_priv(mdev); enum mlx4_port_type types[MLX4_MAX_PORTS]; enum mlx4_port_type new_types[MLX4_MAX_PORTS]; + static DEFINE_MUTEX(set_port_type_mutex); int i; int err = 0; + mutex_lock(&set_port_type_mutex); + if (!strcmp(buf, "ib\n")) info->tmp_type = MLX4_PORT_TYPE_IB; else if (!strcmp(buf, "eth\n")) @@ -912,7 +1005,8 @@ static ssize_t set_port_type(struct device *dev, info->tmp_type = MLX4_PORT_TYPE_AUTO; else { mlx4_err(mdev, "%s is not supported port type\n", buf); - return -EINVAL; + err = -EINVAL; + goto err_out; } mlx4_stop_sense(mdev); @@ -958,6 +1052,9 @@ static ssize_t set_port_type(struct device *dev, out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); +err_out: + mutex_unlock(&set_port_type_mutex); + return err ? err : count; } @@ -1123,8 +1220,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, if (err) goto err_srq; - num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : - dev->caps.num_eqs; + num_eqs = dev->phys_caps.num_phys_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_EQ * @@ -1186,8 +1282,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, } - num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : - dev->caps.num_eqs; + num_eqs = dev->phys_caps.num_phys_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.table, init_hca->eqc_base, dev_cap->eqc_entry_sz, num_eqs, num_eqs, 0, 0); @@ -1466,6 +1561,12 @@ static void mlx4_close_hca(struct mlx4_dev *dev) else { mlx4_CLOSE_HCA(dev, 0); mlx4_free_icms(dev); + } +} + +static void mlx4_close_fw(struct mlx4_dev *dev) +{ + if (!mlx4_is_slave(dev)) { mlx4_UNMAP_FA(dev); mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); } @@ -1561,10 +1662,46 @@ static int choose_log_fs_mgm_entry_size(int qp_per_entry) return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; } +static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) +{ + switch (dmfs_high_steer_mode) { + case MLX4_STEERING_DMFS_A0_DEFAULT: + return "default performance"; + + case MLX4_STEERING_DMFS_A0_DYNAMIC: + return "dynamic hybrid mode"; + + case MLX4_STEERING_DMFS_A0_STATIC: + return "performance optimized for limited rule configuration (static)"; + + case MLX4_STEERING_DMFS_A0_DISABLE: + return "disabled performance optimized steering"; + + case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: + return "performance optimized steering not supported"; + + default: + return "Unrecognized mode"; + } +} + +#define MLX4_DMFS_A0_STEERING (1UL << 2) + static void choose_steering_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { - if (mlx4_log_num_mgm_entry_size == -1 && + if (mlx4_log_num_mgm_entry_size <= 0) { + if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { + if (dev->caps.dmfs_high_steer_mode == + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + mlx4_err(dev, "DMFS high rate mode not supported\n"); + else + dev->caps.dmfs_high_steer_mode = + MLX4_STEERING_DMFS_A0_STATIC; + } + } + + if (mlx4_log_num_mgm_entry_size <= 0 && dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && (!mlx4_is_mfunc(dev) || (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && @@ -1577,6 +1714,9 @@ static void choose_steering_mode(struct mlx4_dev *dev, dev->caps.fs_log_max_ucast_qp_range_size = dev_cap->fs_log_max_ucast_qp_range_size; } else { + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) dev->caps.steering_mode = MLX4_STEERING_MODE_B0; @@ -1603,7 +1743,8 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && - dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS && + dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; else dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; @@ -1612,16 +1753,39 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev, == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); } -static int mlx4_init_hca(struct mlx4_dev *dev) +static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) +{ + int i; + struct mlx4_port_cap port_cap; + + if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + return -EINVAL; + + for (i = 1; i <= dev->caps.num_ports; i++) { + if (mlx4_dev_port(dev, i, &port_cap)) { + mlx4_err(dev, + "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); + } else if ((dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_DEFAULT) && + (port_cap.dmfs_optimized_state == + !!(dev->caps.dmfs_high_steer_mode == + MLX4_STEERING_DMFS_A0_DISABLE))) { + mlx4_err(dev, + "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", + dmfs_high_rate_steering_mode_str( + dev->caps.dmfs_high_steer_mode), + (port_cap.dmfs_optimized_state ? + "enabled" : "disabled")); + } + } + + return 0; +} + +static int mlx4_init_fw(struct mlx4_dev *dev) { - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_adapter adapter; - struct mlx4_dev_cap dev_cap; struct mlx4_mod_stat_cfg mlx4_cfg; - struct mlx4_profile profile; - struct mlx4_init_hca_param init_hca; - u64 icm_size; - int err; + int err = 0; if (!mlx4_is_slave(dev)) { err = mlx4_QUERY_FW(dev); @@ -1644,7 +1808,23 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); if (err) mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); + } + + return err; +} + +static int mlx4_init_hca(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_adapter adapter; + struct mlx4_dev_cap dev_cap; + struct mlx4_profile profile; + struct mlx4_init_hca_param init_hca; + u64 icm_size; + struct mlx4_config_dev_params params; + int err; + if (!mlx4_is_slave(dev)) { err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); @@ -1654,6 +1834,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev) choose_steering_mode(dev, &dev_cap); choose_tunnel_offload_mode(dev, &dev_cap); + if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && + mlx4_is_master(dev)) + dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; + err = mlx4_get_phys_port_id(dev); if (err) mlx4_err(dev, "Fail to get physical port id\n"); @@ -1696,6 +1880,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev) mlx4_err(dev, "INIT_HCA command failed, aborting\n"); goto err_free_icm; } + + if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_query_func(dev, &dev_cap); + if (err < 0) { + mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); + goto err_stop_fw; + } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { + dev->caps.num_eqs = dev_cap.max_eqs; + dev->caps.reserved_eqs = dev_cap.reserved_eqs; + dev->caps.reserved_uars = dev_cap.reserved_uars; + } + } + /* * If TS is supported by FW * read HCA frequency by QUERY_HCA command @@ -1727,6 +1924,24 @@ static int mlx4_init_hca(struct mlx4_dev *dev) mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); } } + + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { + if (mlx4_validate_optimized_steering(dev)) + mlx4_warn(dev, "Optimized steering validation failed\n"); + + if (dev->caps.dmfs_high_steer_mode == + MLX4_STEERING_DMFS_A0_DISABLE) { + dev->caps.dmfs_high_rate_qpn_base = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + dev->caps.dmfs_high_rate_qpn_range = + MLX4_A0_STEERING_TABLE_SIZE; + } + + mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", + dmfs_high_rate_steering_mode_str( + dev->caps.dmfs_high_steer_mode)); + } } else { err = mlx4_init_slave(dev); if (err) { @@ -1755,6 +1970,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev) goto unmap_bf; } + /* Query CONFIG_DEV parameters */ + err = mlx4_config_dev_retrieval(dev, ¶ms); + if (err && err != -ENOTSUPP) { + mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); + } else if (!err) { + dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; + dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; + } priv->eq_table.inta_pin = adapter.inta_pin; memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); @@ -2054,12 +2277,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; - int nreq = min_t(int, dev->caps.num_ports * - min_t(int, num_online_cpus() + 1, - MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX); int i; if (msi_x) { + int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); @@ -2259,6 +2481,71 @@ static void mlx4_free_ownership(struct mlx4_dev *dev) iounmap(owner); } +#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ + !!((flags) & MLX4_FLAG_MASTER)) + +static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, + u8 total_vfs, int existing_vfs) +{ + u64 dev_flags = dev->flags; + + dev->dev_vfs = kzalloc( + total_vfs * sizeof(*dev->dev_vfs), + GFP_KERNEL); + if (NULL == dev->dev_vfs) { + mlx4_err(dev, "Failed to allocate memory for VFs\n"); + goto disable_sriov; + } else if (!(dev->flags & MLX4_FLAG_SRIOV)) { + int err = 0; + + atomic_inc(&pf_loading); + if (existing_vfs) { + if (existing_vfs != total_vfs) + mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", + existing_vfs, total_vfs); + } else { + mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); + err = pci_enable_sriov(pdev, total_vfs); + } + if (err) { + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", + err); + atomic_dec(&pf_loading); + goto disable_sriov; + } else { + mlx4_warn(dev, "Running in master mode\n"); + dev_flags |= MLX4_FLAG_SRIOV | + MLX4_FLAG_MASTER; + dev_flags &= ~MLX4_FLAG_SLAVE; + dev->num_vfs = total_vfs; + } + } + return dev_flags; + +disable_sriov: + dev->num_vfs = 0; + kfree(dev->dev_vfs); + return dev_flags & ~MLX4_FLAG_MASTER; +} + +enum { + MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, +}; + +static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, + int *nvfs) +{ + int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; + /* Checking for 64 VFs as a limitation of CX2 */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && + requested_vfs >= 64) { + mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", + requested_vfs); + return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; + } + return 0; +} + static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, int total_vfs, int *nvfs, struct mlx4_priv *priv) { @@ -2267,6 +2554,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, int err; int port; int i; + struct mlx4_dev_cap *dev_cap = NULL; int existing_vfs = 0; dev = &priv->dev; @@ -2303,40 +2591,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, } } - if (total_vfs) { - mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", - total_vfs); - dev->dev_vfs = kzalloc( - total_vfs * sizeof(*dev->dev_vfs), - GFP_KERNEL); - if (NULL == dev->dev_vfs) { - mlx4_err(dev, "Failed to allocate memory for VFs\n"); - err = -ENOMEM; - goto err_free_own; - } else { - atomic_inc(&pf_loading); - existing_vfs = pci_num_vf(pdev); - if (existing_vfs) { - err = 0; - if (existing_vfs != total_vfs) - mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", - existing_vfs, total_vfs); - } else { - err = pci_enable_sriov(pdev, total_vfs); - } - if (err) { - mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", - err); - atomic_dec(&pf_loading); - } else { - mlx4_warn(dev, "Running in master mode\n"); - dev->flags |= MLX4_FLAG_SRIOV | - MLX4_FLAG_MASTER; - dev->num_vfs = total_vfs; - } - } - } - atomic_set(&priv->opreq_count, 0); INIT_WORK(&priv->opreq_task, mlx4_opreq_action); @@ -2350,6 +2604,12 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, mlx4_err(dev, "Failed to reset HCA, aborting\n"); goto err_sriov; } + + if (total_vfs) { + existing_vfs = pci_num_vf(pdev); + dev->flags = MLX4_FLAG_MASTER; + dev->num_vfs = total_vfs; + } } slave_start: @@ -2363,9 +2623,10 @@ slave_start: * before posting commands. Also, init num_slaves before calling * mlx4_init_hca */ if (mlx4_is_mfunc(dev)) { - if (mlx4_is_master(dev)) + if (mlx4_is_master(dev)) { dev->num_slaves = MLX4_MAX_NUM_SLAVES; - else { + + } else { dev->num_slaves = 0; err = mlx4_multi_func_init(dev); if (err) { @@ -2375,17 +2636,109 @@ slave_start: } } + err = mlx4_init_fw(dev); + if (err) { + mlx4_err(dev, "Failed to init fw, aborting.\n"); + goto err_mfunc; + } + + if (mlx4_is_master(dev)) { + if (!dev_cap) { + dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); + + if (!dev_cap) { + err = -ENOMEM; + goto err_fw; + } + + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_fw; + } + + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) + goto err_fw; + + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { + u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, + existing_vfs); + + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + dev->flags = dev_flags; + if (!SRIOV_VALID_STATE(dev->flags)) { + mlx4_err(dev, "Invalid SRIOV state\n"); + goto err_sriov; + } + err = mlx4_reset(dev); + if (err) { + mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + goto err_sriov; + } + goto slave_start; + } + } else { + /* Legacy mode FW requires SRIOV to be enabled before + * doing QUERY_DEV_CAP, since max_eq's value is different if + * SRIOV is enabled. + */ + memset(dev_cap, 0, sizeof(*dev_cap)); + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_fw; + } + + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) + goto err_fw; + } + } + err = mlx4_init_hca(dev); if (err) { if (err == -EACCES) { /* Not primary Physical function * Running in slave mode */ - mlx4_cmd_cleanup(dev); + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + /* We're not a PF */ + if (dev->flags & MLX4_FLAG_SRIOV) { + if (!existing_vfs) + pci_disable_sriov(pdev); + if (mlx4_is_master(dev)) + atomic_dec(&pf_loading); + dev->flags &= ~MLX4_FLAG_SRIOV; + } + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); dev->flags |= MLX4_FLAG_SLAVE; dev->flags &= ~MLX4_FLAG_MASTER; goto slave_start; } else - goto err_mfunc; + goto err_fw; + } + + if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { + u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs); + + if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); + dev->flags = dev_flags; + err = mlx4_cmd_init(dev); + if (err) { + /* Only VHCR is cleaned up, so could still + * send FW commands + */ + mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); + goto err_close; + } + } else { + dev->flags = dev_flags; + } + + if (!SRIOV_VALID_STATE(dev->flags)) { + mlx4_err(dev, "Invalid SRIOV state\n"); + goto err_close; + } } /* check if the device is functioning at its maximum possible speed. @@ -2540,12 +2893,15 @@ err_master_mfunc: err_close: mlx4_close_hca(dev); +err_fw: + mlx4_close_fw(dev); + err_mfunc: if (mlx4_is_slave(dev)) mlx4_multi_func_cleanup(dev); err_cmd: - mlx4_cmd_cleanup(dev); + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); err_sriov: if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) @@ -2556,10 +2912,10 @@ err_sriov: kfree(priv->dev.dev_vfs); -err_free_own: if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); + kfree(dev_cap); return err; } @@ -2787,15 +3143,17 @@ static void mlx4_unload_one(struct pci_dev *pdev) if (mlx4_is_master(dev)) mlx4_multi_func_cleanup(dev); mlx4_close_hca(dev); + mlx4_close_fw(dev); if (mlx4_is_slave(dev)) mlx4_multi_func_cleanup(dev); - mlx4_cmd_cleanup(dev); + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); + dev->flags &= ~MLX4_FLAG_SRIOV; dev->num_vfs = 0; } @@ -2956,10 +3314,11 @@ static int __init mlx4_verify_params(void) port_type_array[0] = true; } - if (mlx4_log_num_mgm_entry_size != -1 && - (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || - mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { - pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n", + if (mlx4_log_num_mgm_entry_size < -7 || + (mlx4_log_num_mgm_entry_size > 0 && + (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || + mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { + pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", mlx4_log_num_mgm_entry_size, MLX4_MIN_MGM_LOG_ENTRY_SIZE, MLX4_MAX_MGM_LOG_ENTRY_SIZE); diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 872843179f44af3c7442614477bd2af131118347..a3867e7ef8859811c7d8ee132e966cfe26709a6e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -999,12 +999,27 @@ int mlx4_flow_attach(struct mlx4_dev *dev, } ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); - if (ret == -ENOMEM) + if (ret == -ENOMEM) { mlx4_err_rule(dev, "mcg table is full. Fail to register network rule\n", rule); - else if (ret) - mlx4_err_rule(dev, "Fail to register network rule\n", rule); + } else if (ret) { + if (ret == -ENXIO) { + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) + mlx4_err_rule(dev, + "DMFS is not enabled, " + "failed to register network rule.\n", + rule); + else + mlx4_err_rule(dev, + "Rule exceeds the dmfs_high_rate_mode limitations, " + "failed to register network rule.\n", + rule); + + } else { + mlx4_err_rule(dev, "Fail to register network rule.\n", rule); + } + } mlx4_free_cmd_mailbox(dev, mailbox); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index de10dbb2e6ed519e958f6f4d77b75bdd91d3076f..bdd4eea2247cc1b09c19551a21d1c08f2c65e9d5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -43,6 +43,8 @@ #include #include #include +#include +#include #include #include @@ -243,6 +245,7 @@ struct mlx4_bitmap { u32 reserved_top; u32 mask; u32 avail; + u32 effective_len; spinlock_t lock; unsigned long *table; }; @@ -373,6 +376,14 @@ struct mlx4_srq_context { __be64 db_rec_addr; }; +struct mlx4_eq_tasklet { + struct list_head list; + struct list_head process_list; + struct tasklet_struct task; + /* lock on completion tasklet list */ + spinlock_t lock; +}; + struct mlx4_eq { struct mlx4_dev *dev; void __iomem *doorbell; @@ -383,6 +394,7 @@ struct mlx4_eq { int nent; struct mlx4_buf_list *page_list; struct mlx4_mtt mtt; + struct mlx4_eq_tasklet tasklet_ctx; }; struct mlx4_slave_eqe { @@ -606,6 +618,7 @@ struct mlx4_cmd { u8 use_events; u8 toggle; u8 comm_toggle; + u8 initialized; }; enum { @@ -669,8 +682,17 @@ struct mlx4_srq_table { struct mlx4_icm_table cmpt_table; }; +enum mlx4_qp_table_zones { + MLX4_QP_TABLE_ZONE_GENERAL, + MLX4_QP_TABLE_ZONE_RSS, + MLX4_QP_TABLE_ZONE_RAW_ETH, + MLX4_QP_TABLE_ZONE_NUM +}; + struct mlx4_qp_table { - struct mlx4_bitmap bitmap; + struct mlx4_bitmap *bitmap_gen; + struct mlx4_zone_allocator *zones; + u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM]; u32 rdmarc_base; int rdmarc_shift; spinlock_t lock; @@ -872,7 +894,8 @@ extern struct workqueue_struct *mlx4_wq; u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr); -u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); +u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, + int align, u32 skip_mask); void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, int use_rr); u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); @@ -947,13 +970,18 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, - int *base); + int *base, u8 flags); void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); @@ -1121,8 +1149,16 @@ int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave, int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); +enum { + MLX4_CMD_CLEANUP_STRUCT = 1UL << 0, + MLX4_CMD_CLEANUP_POOL = 1UL << 1, + MLX4_CMD_CLEANUP_HCR = 1UL << 2, + MLX4_CMD_CLEANUP_VHCR = 1UL << 3, + MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1 +}; + int mlx4_cmd_init(struct mlx4_dev *dev); -void mlx4_cmd_cleanup(struct mlx4_dev *dev); +void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); int mlx4_multi_func_init(struct mlx4_dev *dev); void mlx4_multi_func_cleanup(struct mlx4_dev *dev); void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); @@ -1132,6 +1168,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev); int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, unsigned long timeout); +void mlx4_cq_tasklet_cb(unsigned long data); void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); @@ -1273,6 +1310,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); @@ -1313,4 +1355,72 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); int mlx4_config_mad_demux(struct mlx4_dev *dev); +enum mlx4_zone_flags { + MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, + MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1, + MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2, + MLX4_ZONE_USE_RR = 1UL << 3, +}; + +enum mlx4_zone_alloc_flags { + /* No two objects could overlap between zones. UID + * could be left unused. If this flag is given and + * two overlapped zones are used, an object will be free'd + * from the smallest possible matching zone. + */ + MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0, +}; + +struct mlx4_zone_allocator; + +/* Create a new zone allocator */ +struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags); + +/* Attach a mlx4_bitmap of priority to the zone allocator + * . Allocating an object from this zone adds an offset . + * Similarly, when searching for an object to free, this offset it taken into + * account. The use_rr mlx4_ib parameter for allocating objects from this + * is given through the MLX4_ZONE_USE_RR flag in . + * When an allocation fails, tries to allocate from other zones + * according to the policy set by . is the unique identifier + * received to this zone. + */ +int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, + struct mlx4_bitmap *bitmap, + u32 flags, + int priority, + int offset, + u32 *puid); + +/* Remove bitmap indicated by from */ +int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid); + +/* Delete the zone allocator objects with align and skip_mask + * from the mlx4_bitmap whose uid is . The bitmap which we actually + * allocated from is returned in . If the allocation fails, a negative + * number is returned. Otherwise, the offset of the first object is returned. + */ +u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, + int align, u32 skip_mask, u32 *puid); + +/* Free objects, start from of the uid from zone_allocator + * . + */ +u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, + u32 uid, u32 obj, u32 count); + +/* If was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of + * specifying the uid when freeing an object, zone allocator could figure it by + * itself. Other parameters are similar to mlx4_zone_free. + */ +u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count); + +/* Returns a pointer to mlx4_bitmap that was attached to with */ +struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid); + #endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 8fef65840b3b5e4176d512b4a2e36d510a1da311..944a112dff374ef919a216e5ee13415b9f3974bd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -326,6 +326,7 @@ struct mlx4_en_rx_ring { #endif unsigned long csum_ok; unsigned long csum_none; + unsigned long csum_complete; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; }; @@ -375,7 +376,6 @@ struct mlx4_en_port_profile { }; struct mlx4_en_profile { - int rss_xor; int udp_rss; u8 rss_mask; u32 active_ports; @@ -421,10 +421,16 @@ struct mlx4_en_rss_map { enum mlx4_qp_state indir_state; }; +enum mlx4_en_port_flag { + MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */ + MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */ +}; + struct mlx4_en_port_state { int link_state; int link_speed; - int transciver; + int transceiver; + u32 flags; }; struct mlx4_en_pkt_stats { @@ -443,6 +449,7 @@ struct mlx4_en_port_stats { unsigned long rx_alloc_failed; unsigned long rx_chksum_good; unsigned long rx_chksum_none; + unsigned long rx_chksum_complete; unsigned long tx_chksum_offload; #define NUM_PORT_STATS 9 }; @@ -475,7 +482,6 @@ struct mlx4_en_frag_info { u16 frag_size; u16 frag_prefix_size; u16 frag_stride; - u16 frag_align; }; #ifdef CONFIG_MLX4_EN_DCB @@ -502,7 +508,8 @@ enum { MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2), /* whether we need to drop packets that hardware loopback-ed */ MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), - MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4) + MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4), + MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), }; #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE) @@ -610,6 +617,8 @@ struct mlx4_en_priv { __be16 vxlan_port; u32 pflags; + u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; + u8 rss_hash_fn; }; enum mlx4_en_wol { @@ -769,7 +778,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, - int qpn, u32 size, u16 stride, + u32 size, u16 stride, int node, int queue_index); void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring); @@ -829,6 +838,13 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv); void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); +#define DEV_FEATURE_CHANGED(dev, new_features, feature) \ + ((dev->features & feature) ^ (new_features & feature)) + +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t new_features); + /* * Functions for time stamping */ @@ -838,9 +854,6 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, u64 timestamp); void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev); void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev); -int mlx4_en_timestamp_config(struct net_device *dev, - int tx_type, - int rx_filter); /* Globals */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 193a6adb5d04d887e325f59793423c91414c1fcb..d6f549685c0fcd8a5cccf0c948536ed924024892 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -130,10 +130,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) err_out_free: for (i = 0; i <= buddy->max_order; ++i) - if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i])) - vfree(buddy->bits[i]); - else - kfree(buddy->bits[i]); + kvfree(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -147,10 +144,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - if (is_vmalloc_addr(buddy->bits[i])) - vfree(buddy->bits[i]); - else - kfree(buddy->bits[i]); + kvfree(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 94eeb2c7d7e439725be63bf48cd061d361816a9c..30eb1ead0fe6ee8942aeb1aefb57971d35590bcf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1311,3 +1311,159 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, return 0; } EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); + +/* Cable Module Info */ +#define MODULE_INFO_MAX_READ 48 + +#define I2C_ADDR_LOW 0x50 +#define I2C_ADDR_HIGH 0x51 +#define I2C_PAGE_SIZE 256 + +/* Module Info Data */ +struct mlx4_cable_info { + u8 i2c_addr; + u8 page_num; + __be16 dev_mem_address; + __be16 reserved1; + __be16 size; + __be32 reserved2[2]; + u8 data[MODULE_INFO_MAX_READ]; +}; + +enum cable_info_err { + CABLE_INF_INV_PORT = 0x1, + CABLE_INF_OP_NOSUP = 0x2, + CABLE_INF_NOT_CONN = 0x3, + CABLE_INF_NO_EEPRM = 0x4, + CABLE_INF_PAGE_ERR = 0x5, + CABLE_INF_INV_ADDR = 0x6, + CABLE_INF_I2C_ADDR = 0x7, + CABLE_INF_QSFP_VIO = 0x8, + CABLE_INF_I2C_BUSY = 0x9, +}; + +#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF) + +static inline const char *cable_info_mad_err_str(u16 mad_status) +{ + u8 err = MAD_STATUS_2_CABLE_ERR(mad_status); + + switch (err) { + case CABLE_INF_INV_PORT: + return "invalid port selected"; + case CABLE_INF_OP_NOSUP: + return "operation not supported for this port (the port is of type CX4 or internal)"; + case CABLE_INF_NOT_CONN: + return "cable is not connected"; + case CABLE_INF_NO_EEPRM: + return "the connected cable has no EPROM (passive copper cable)"; + case CABLE_INF_PAGE_ERR: + return "page number is greater than 15"; + case CABLE_INF_INV_ADDR: + return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)"; + case CABLE_INF_I2C_ADDR: + return "invalid I2C slave address"; + case CABLE_INF_QSFP_VIO: + return "at least one cable violates the QSFP specification and ignores the modsel signal"; + case CABLE_INF_I2C_BUSY: + return "I2C bus is constantly busy"; + } + return "Unknown Error"; +} + +/** + * mlx4_get_module_info - Read cable module eeprom data + * @dev: mlx4_dev. + * @port: port number. + * @offset: byte offset in eeprom to start reading data from. + * @size: num of bytes to read. + * @data: output buffer to put the requested data into. + * + * Reads cable module eeprom data, puts the outcome data into + * data pointer paramer. + * Returns num of read bytes on success or a negative error + * code. + */ +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_mad_ifc *inmad, *outmad; + struct mlx4_cable_info *cable_info; + u16 i2c_addr; + int ret; + + if (size > MODULE_INFO_MAX_READ) + size = MODULE_INFO_MAX_READ; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inmad = (struct mlx4_mad_ifc *)(inbox->buf); + outmad = (struct mlx4_mad_ifc *)(outbox->buf); + + inmad->method = 0x1; /* Get */ + inmad->class_version = 0x1; + inmad->mgmt_class = 0x1; + inmad->base_version = 0x1; + inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */ + + if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE) + /* Cross pages reads are not allowed + * read until offset 256 in low page + */ + size -= offset + size - I2C_PAGE_SIZE; + + i2c_addr = I2C_ADDR_LOW; + if (offset >= I2C_PAGE_SIZE) { + /* Reset offset to high page */ + i2c_addr = I2C_ADDR_HIGH; + offset -= I2C_PAGE_SIZE; + } + + cable_info = (struct mlx4_cable_info *)inmad->data; + cable_info->dev_mem_address = cpu_to_be16(offset); + cable_info->page_num = 0; + cable_info->i2c_addr = i2c_addr; + cable_info->size = cpu_to_be16(size); + + ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (ret) + goto out; + + if (be16_to_cpu(outmad->status)) { + /* Mad returned with bad status */ + ret = be16_to_cpu(outmad->status); + mlx4_warn(dev, + "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n", + 0xFF60, port, i2c_addr, offset, size, + ret, cable_info_mad_err_str(ret)); + + if (i2c_addr == I2C_ADDR_HIGH && + MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR) + /* Some SFP cables do not support i2c slave + * address 0x51 (high page), abort silently. + */ + ret = 0; + else + ret = -ret; + goto out; + } + cable_info = (struct mlx4_cable_info *)outmad->data; + memcpy(data, cable_info->data, size); + ret = size; +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return ret; +} +EXPORT_SYMBOL(mlx4_get_module_info); diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 14089d9e1667fcc4287fe08ca34590e279f66561..2bf437aafc537c8afc979f7aa3ab5f6611dcc4c3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -126,8 +126,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_AUXC].num = request->num_qp; profile[MLX4_RES_SRQ].num = request->num_srq; profile[MLX4_RES_CQ].num = request->num_cq; - profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? - dev->phys_caps.num_phys_eqs : + profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs : min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); profile[MLX4_RES_DMPT].num = request->num_mpt; profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; @@ -216,10 +215,18 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, init_hca->log_num_cqs = profile[i].log_num; break; case MLX4_RES_EQ: - dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs, - MAX_MSIX)); - init_hca->eqc_base = profile[i].start; - init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + init_hca->log_num_eqs = 0x1f; + init_hca->eqc_base = profile[i].start; + init_hca->num_sys_eqs = dev_cap->num_sys_eqs; + } else { + dev->caps.num_eqs = roundup_pow_of_two( + min_t(unsigned, + dev_cap->max_eqs, + MAX_MSIX)); + init_hca->eqc_base = profile[i].start; + init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); + } break; case MLX4_RES_DMPT: dev->caps.num_mpts = profile[i].num; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2301365c79c710b01d07d4a8be7353f2a371e03a..1586ecce13c719b1eaddfb43f0d96d80e27b6389 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -42,6 +42,10 @@ #include "mlx4.h" #include "icm.h" +/* QP to support BF should have bits 6,7 cleared */ +#define MLX4_BF_QP_SKIP_MASK 0xc0 +#define MLX4_MAX_BF_QP_RANGE 0x40 + void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; @@ -207,26 +211,45 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, EXPORT_SYMBOL_GPL(mlx4_qp_modify); int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, - int *base) + int *base, u8 flags) { + u32 uid; + int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP); + struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; - *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); + if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp) + return -ENOMEM; + + uid = MLX4_QP_TABLE_ZONE_GENERAL; + if (flags & (u8)MLX4_RESERVE_A0_QP) { + if (bf_qp) + uid = MLX4_QP_TABLE_ZONE_RAW_ETH; + else + uid = MLX4_QP_TABLE_ZONE_RSS; + } + + *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, + bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL); if (*base == -1) return -ENOMEM; return 0; } -int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags) { u64 in_param = 0; u64 out_param; int err; + /* Turn off all unsupported QP allocation flags */ + flags &= dev->caps.alloc_res_qp_mask; + if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, cnt); + set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt); set_param_h(&in_param, align); err = mlx4_cmd_imm(dev, in_param, &out_param, RES_QP, RES_OP_RESERVE, @@ -238,7 +261,7 @@ int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) *base = get_param_l(&out_param); return 0; } - return __mlx4_qp_reserve_range(dev, cnt, align, base); + return __mlx4_qp_reserve_range(dev, cnt, align, base, flags); } EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); @@ -249,7 +272,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) return; - mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR); + mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt); } void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) @@ -459,28 +482,261 @@ static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } +#define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2 +#define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1 +#define MLX4_QP_TABLE_RAW_ETH_SIZE 256 + +static int mlx4_create_zones(struct mlx4_dev *dev, + u32 reserved_bottom_general, + u32 reserved_top_general, + u32 reserved_bottom_rss, + u32 start_offset_rss, + u32 max_table_offset) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL; + int bitmap_initialized = 0; + u32 last_offset; + int k; + int err; + + qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP); + + if (NULL == qp_table->zones) + return -ENOMEM; + + bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); + + if (NULL == bitmap) { + err = -ENOMEM; + goto free_zone; + } + + err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps, + (1 << 23) - 1, reserved_bottom_general, + reserved_top_general); + + if (err) + goto free_bitmap; + + ++bitmap_initialized; + + err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL, + MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO | + MLX4_ZONE_USE_RR, 0, + 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL); + + if (err) + goto free_bitmap; + + err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS, + reserved_bottom_rss, + reserved_bottom_rss - 1, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + reserved_bottom_rss - start_offset_rss); + + if (err) + goto free_bitmap; + + ++bitmap_initialized; + + err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS, + MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | + MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | + MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY, + 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS); + + if (err) + goto free_bitmap; + + last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + /* We have a single zone for the A0 steering QPs area of the FW. This area + * needs to be split into subareas. One set of subareas is for RSS QPs + * (in which qp number bits 6 and/or 7 are set); the other set of subareas + * is for RAW_ETH QPs, which require that both bits 6 and 7 are zero. + * Currently, the values returned by the FW (A0 steering area starting qp number + * and A0 steering area size) are such that there are only two subareas -- one + * for RSS and one for RAW_ETH. + */ + for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]); + k++) { + int size; + u32 offset = start_offset_rss; + u32 bf_mask; + u32 requested_size; + + /* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates + * a mask of all LSB bits set until (and not including) the first + * set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK + * is 0xc0, bf_mask will be 0x3f. + */ + bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1; + requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1); + + if (((last_offset & MLX4_BF_QP_SKIP_MASK) && + ((int)(max_table_offset - last_offset)) >= + roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) || + (!(last_offset & MLX4_BF_QP_SKIP_MASK) && + !((last_offset + requested_size - 1) & + MLX4_BF_QP_SKIP_MASK))) + size = requested_size; + else { + u32 candidate_offset = + (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1; + + if (last_offset & MLX4_BF_QP_SKIP_MASK) + last_offset = candidate_offset; + + /* From this point, the BF bits are 0 */ + + if (last_offset > max_table_offset) { + /* need to skip */ + size = -1; + } else { + size = min3(max_table_offset - last_offset, + bf_mask - (last_offset & bf_mask), + requested_size); + if (size < requested_size) { + int candidate_size; + + candidate_size = min3( + max_table_offset - candidate_offset, + bf_mask - (last_offset & bf_mask), + requested_size); + + /* We will not take this path if last_offset was + * already set above to candidate_offset + */ + if (candidate_size > size) { + last_offset = candidate_offset; + size = candidate_size; + } + } + } + } + + if (size > 0) { + /* mlx4_bitmap_alloc_range will find a contiguous range of "size" + * QPs in which both bits 6 and 7 are zero, because we pass it the + * MLX4_BF_SKIP_MASK). + */ + offset = mlx4_bitmap_alloc_range( + *bitmap + MLX4_QP_TABLE_ZONE_RSS, + size, 1, + MLX4_BF_QP_SKIP_MASK); + + if (offset == (u32)-1) { + err = -ENOMEM; + break; + } + + last_offset = offset + size; + + err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size), + roundup_pow_of_two(size) - 1, 0, + roundup_pow_of_two(size) - size); + } else { + /* Add an empty bitmap, we'll allocate from different zones (since + * at least one is reserved) + */ + err = mlx4_bitmap_init(*bitmap + k, 1, + MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0, + 0); + mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); + } + + if (err) + break; + + ++bitmap_initialized; + + err = mlx4_zone_add_one(qp_table->zones, *bitmap + k, + MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | + MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | + MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY, + offset, qp_table->zones_uids + k); + + if (err) + break; + } + + if (err) + goto free_bitmap; + + qp_table->bitmap_gen = *bitmap; + + return err; + +free_bitmap: + for (k = 0; k < bitmap_initialized; k++) + mlx4_bitmap_cleanup(*bitmap + k); + kfree(bitmap); +free_zone: + mlx4_zone_allocator_destroy(qp_table->zones); + return err; +} + +static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + + if (qp_table->zones) { + int i; + + for (i = 0; + i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]); + i++) { + struct mlx4_bitmap *bitmap = + mlx4_zone_get_bitmap(qp_table->zones, + qp_table->zones_uids[i]); + + mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]); + if (NULL == bitmap) + continue; + + mlx4_bitmap_cleanup(bitmap); + } + mlx4_zone_allocator_destroy(qp_table->zones); + kfree(qp_table->bitmap_gen); + qp_table->bitmap_gen = NULL; + qp_table->zones = NULL; + } +} + int mlx4_init_qp_table(struct mlx4_dev *dev) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; int err; int reserved_from_top = 0; + int reserved_from_bot; int k; + int fixed_reserved_from_bot_rv = 0; + int bottom_reserved_for_rss_bitmap; + u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base + + dev->caps.dmfs_high_rate_qpn_range; spin_lock_init(&qp_table->lock); INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); if (mlx4_is_slave(dev)) return 0; - /* - * We reserve 2 extra QPs per port for the special QPs. The + /* We reserve 2 extra QPs per port for the special QPs. The * block of special QPs must be aligned to a multiple of 8, so * round up. * * We also reserve the MSB of the 24-bit QP number to indicate * that a QP is an XRC QP. */ - dev->phys_caps.base_sqpn = - ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); + for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++) + fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k]; + + if (fixed_reserved_from_bot_rv < max_table_offset) + fixed_reserved_from_bot_rv = max_table_offset; + + /* We reserve at least 1 extra for bitmaps that we don't have enough space for*/ + bottom_reserved_for_rss_bitmap = + roundup_pow_of_two(fixed_reserved_from_bot_rv + 1); + dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8); { int sort[MLX4_NUM_QP_REGION]; @@ -490,8 +746,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) for (i = 1; i < MLX4_NUM_QP_REGION; ++i) sort[i] = i; - for (i = MLX4_NUM_QP_REGION; i > 0; --i) { - for (j = 2; j < i; ++j) { + for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) { + for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) { if (dev->caps.reserved_qps_cnt[sort[j]] > dev->caps.reserved_qps_cnt[sort[j - 1]]) { tmp = sort[j]; @@ -501,13 +757,12 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) } } - for (i = 1; i < MLX4_NUM_QP_REGION; ++i) { + for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) { last_base -= dev->caps.reserved_qps_cnt[sort[i]]; dev->caps.reserved_qps_base[sort[i]] = last_base; reserved_from_top += dev->caps.reserved_qps_cnt[sort[i]]; } - } /* Reserve 8 real SQPs in both native and SRIOV modes. @@ -520,10 +775,17 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) * b. All the proxy SQPs (8 per function) * c. All the tunnel QPs (8 per function) */ + reserved_from_bot = mlx4_num_reserved_sqps(dev); + if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) { + mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n"); + return -EINVAL; + } + + err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot, + bottom_reserved_for_rss_bitmap, + fixed_reserved_from_bot_rv, + max_table_offset); - err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, - (1 << 23) - 1, mlx4_num_reserved_sqps(dev), - reserved_from_top); if (err) return err; @@ -559,7 +821,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn); if (err) goto err_mem; - return 0; + + return err; err_mem: kfree(dev->caps.qp0_tunnel); @@ -568,6 +831,7 @@ err_mem: kfree(dev->caps.qp1_proxy); dev->caps.qp0_tunnel = dev->caps.qp0_proxy = dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; + mlx4_cleanup_qp_zones(dev); return err; } @@ -577,7 +841,8 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev) return; mlx4_CONF_SPECIAL_QP(dev, 0); - mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); + + mlx4_cleanup_qp_zones(dev); } int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index cd5cf6d957c7afa98d76ad54ba42beb1b395e3c8..4efbd1eca6116001e21d655c54479c71cb6f6584 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1543,16 +1543,21 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, int align; int base; int qpn; + u8 flags; switch (op) { case RES_OP_RESERVE: count = get_param_l(&in_param) & 0xffffff; + /* Turn off all unsupported QP allocation flags that the + * slave tries to set. + */ + flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask; align = get_param_h(&in_param); err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); if (err) return err; - err = __mlx4_qp_reserve_range(dev, count, align, &base); + err = __mlx4_qp_reserve_range(dev, count, align, &base, flags); if (err) { mlx4_release_resource(dev, slave, RES_QP, count, 0); return err; @@ -2872,6 +2877,23 @@ out_add: return err; } +int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + u8 get = vhcr->op_modifier; + + if (get != 1) + return -EPERM; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + + return err; +} + static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, int len, struct res_mtt **res) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 368c6c5ea014d2a1c8adc8284269452ac915faab..a2853057c779529b0a226e5aa54adfbc10c3a645 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1363,7 +1363,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) goto err_map; } - if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) { + if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { dev_err(&dev->pdev->dev, "command queue size overflow\n"); err = -EINVAL; goto err_map; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ad2c96a02a5352caec2d8649c714c3616be98ea8..ab684463780b4fbd193b79eea5299a743e7a36cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -225,8 +225,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; - mlx5_core_dbg(dev, "event %s(%d) arrived\n", - eqe_type_str(eqe->type), eqe->type); + mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", + eqe_type_str(eqe->type), eqe->type, rsn); mlx5_rsc_event(dev, rsn, eqe->type); break; @@ -390,7 +390,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, */ eq_update_ci(eq, 1); - mlx5_vfree(in); + kvfree(in); return 0; err_irq: @@ -400,7 +400,7 @@ err_eq: mlx5_cmd_destroy_eq(dev, eq->eqn); err_in: - mlx5_vfree(in); + kvfree(in); err_buf: mlx5_buf_free(dev, &eq->buf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 71b10b210792e316582d0280d87f92cbee6bb2d8..3f4525619a07efb6de4711189565a1f06e624293 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include "mlx5_core.h" @@ -225,7 +226,7 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev) table->msix_arr[i].entry = i; nvec = pci_enable_msix_range(dev->pdev, table->msix_arr, - MLX5_EQ_VEC_COMP_BASE, nvec); + MLX5_EQ_VEC_COMP_BASE + 1, nvec); if (nvec < 0) return nvec; @@ -840,6 +841,8 @@ struct mlx5_core_event_handler { void *data); }; +#define MLX5_IB_MOD "mlx5_ib" + static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -878,6 +881,10 @@ static int init_one(struct pci_dev *pdev, goto out_init; } + err = request_module_nowait(MLX5_IB_MOD); + if (err) + pr_info("failed request module on %s\n", MLX5_IB_MOD); + return 0; out_init: @@ -896,8 +903,12 @@ static void remove_one(struct pci_dev *pdev) } static const struct pci_device_id mlx5_core_pci_table[] = { - { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ + { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ + { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ + { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ + { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ + { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ { 0, } }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index d476918ef269662eb1d77da8d81b99deb3d7617c..4fdaae9b54d99f56f21a78706776805303ba5ed9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -349,7 +349,7 @@ out_4k: for (i--; i >= 0; i--) free_4k(dev, be64_to_cpu(in->pas[i])); out_free: - mlx5_vfree(in); + kvfree(in); return err; } @@ -400,7 +400,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, } out_free: - mlx5_vfree(out); + kvfree(out); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 313965853e10c14a603e71f6207b267c63aafb45..72c2d002c3b8f65e78b89733c87c44d3ba702089 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -68,9 +68,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, memcpy(data_out, out->data, size_out); ex2: - mlx5_vfree(out); + kvfree(out); ex1: - mlx5_vfree(in); + kvfree(in); return err; } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 0a6348cefc0137999577dedf341bef369e6b0166..06801d6f595ef99f0d18cc91d37dcac62be445db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -96,6 +96,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) int err; memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR); in.uarn = cpu_to_be32(uarn); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 9e7e3f1dce3ea84f4305f8b0665fbd0aad5ac877..af099057f0e9c263dc250924785a15cf75ea6edb 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2913,16 +2913,11 @@ again: flags |= MXGEFW_FLAGS_SMALL; /* pad frames to at least ETH_ZLEN bytes */ - if (unlikely(skb->len < ETH_ZLEN)) { - if (skb_padto(skb, ETH_ZLEN)) { - /* The packet is gone, so we must - * return 0 */ - ss->stats.tx_dropped += 1; - return NETDEV_TX_OK; - } - /* adjust the len to account for the zero pad - * so that the nic can know how long it is */ - skb->len = ETH_ZLEN; + if (eth_skb_pad(skb)) { + /* The packet is gone, so we must + * return 0 */ + ss->stats.tx_dropped += 1; + return NETDEV_TX_OK; } } diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index 9e4ddbba70363b8fdedf412b6c830a44f1a0f045..66c2d50d5b8d12df5e0c44bc295948f9d34d6fa1 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -326,13 +326,9 @@ static int mac_onboard_sonic_probe(struct net_device *dev) macintosh_config->ident == MAC_MODEL_P588 || macintosh_config->ident == MAC_MODEL_P575 || macintosh_config->ident == MAC_MODEL_C610) { - unsigned long flags; int card_present; - local_irq_save(flags); card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS); - local_irq_restore(flags); - if (!card_present) { printk("none.\n"); return -ENODEV; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 30d934d66356412bb737342eb134e61359315266..44e8d7d255474d30bf48577723caf9032af43854 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1837,10 +1837,8 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; out: - if (mac->iob_pdev) - pci_dev_put(mac->iob_pdev); - if (mac->dma_pdev) - pci_dev_put(mac->dma_pdev); + pci_dev_put(mac->iob_pdev); + pci_dev_put(mac->dma_pdev); free_netdev(dev); out_disable_device: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index a913b3ad2f899e791a9abd3c5e518d1f7410ceeb..1aa25b13ace1d2ccce0d5cf10fb07fb3a3158038 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -376,13 +376,14 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) } static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *netdev, const unsigned char *addr) + struct net_device *netdev, + const unsigned char *addr, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = -EOPNOTSUPP; if (!adapter->fdb_mac_learn) - return ndo_dflt_fdb_del(ndm, tb, netdev, addr); + return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid); if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || qlcnic_sriov_check(adapter)) { @@ -401,13 +402,13 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *netdev, - const unsigned char *addr, u16 flags) + const unsigned char *addr, u16 vid, u16 flags) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = 0; if (!adapter->fdb_mac_learn) - return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags); + return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags); if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) && !qlcnic_sriov_check(adapter)) { @@ -460,7 +461,7 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter) } static int qlcnic_get_phys_port_id(struct net_device *netdev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 75b1693ec8bfb7d7168b1e9c49a0e075e7b5a1dc..9c31e46d1eee481d112b6cd9a5fb0ec761322b07 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -507,7 +507,7 @@ rx_status_loop: netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", rx_tail, status, len); - new_skb = netdev_alloc_skb_ip_align(dev, buflen); + new_skb = napi_alloc_skb(napi, buflen); if (!new_skb) { dev->stats.rx_dropped++; goto rx_next; diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 007b38cce69a0b3bde4472ab5eef75989cf0dd6c..6d0b9dfac313ce8aa4891a665bf5cf3c394f48f0 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -112,6 +112,7 @@ #include #include #include +#include #include #define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION @@ -182,10 +183,13 @@ static int debug = -1; /* Number of Tx descriptor registers. */ #define NUM_TX_DESC 4 -/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/ -#define MAX_ETH_FRAME_SIZE 1536 +/* max supported ethernet frame size -- must be at least (dev->mtu+18+4).*/ +#define MAX_ETH_FRAME_SIZE 1792 -/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */ +/* max supported payload size */ +#define MAX_ETH_DATA_SIZE (MAX_ETH_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN) + +/* Size of the Tx bounce buffers -- must be at least (dev->mtu+18+4). */ #define TX_BUF_SIZE MAX_ETH_FRAME_SIZE #define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) @@ -920,11 +924,19 @@ static int rtl8139_set_features(struct net_device *dev, netdev_features_t featur return 0; } +static int rtl8139_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68 || new_mtu > MAX_ETH_DATA_SIZE) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + static const struct net_device_ops rtl8139_netdev_ops = { .ndo_open = rtl8139_open, .ndo_stop = rtl8139_close, .ndo_get_stats64 = rtl8139_get_stats64, - .ndo_change_mtu = eth_change_mtu, + .ndo_change_mtu = rtl8139_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = rtl8139_set_mac_address, .ndo_start_xmit = rtl8139_start_xmit, @@ -2025,7 +2037,7 @@ keep_pkt: /* Malloc up new buffer, compatible with net-2e. */ /* Omit the four octet CRC from the length. */ - skb = netdev_alloc_skb_ip_align(dev, pkt_size); + skb = napi_alloc_skb(&tp->napi, pkt_size); if (likely(skb)) { #if RX_BUF_IDX == 3 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h index 040b1373994749264374f0a9c44e217ae1997edf..32497f0e537cdfcfd321be4b6405f85e1ab4beb7 100644 --- a/drivers/net/ethernet/realtek/atp.h +++ b/drivers/net/ethernet/realtek/atp.h @@ -6,10 +6,10 @@ /* The header prepended to received packets. */ struct rx_header { - ushort pad; /* Pad. */ - ushort rx_count; - ushort rx_status; /* Unknown bit assignments :-<. */ - ushort cur_addr; /* Apparently the current buffer address(?) */ + ushort pad; /* Pad. */ + ushort rx_count; + ushort rx_status; /* Unknown bit assignments :-<. */ + ushort cur_addr; /* Apparently the current buffer address(?) */ }; #define PAR_DATA 0 @@ -29,22 +29,25 @@ struct rx_header { #define RdAddr 0xC0 #define HNib 0x10 -enum page0_regs -{ - /* The first six registers hold the ethernet physical station address. */ - PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5, - TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */ - TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */ - ISR = 10, IMR = 11, /* Interrupt status and mask. */ - CMR1 = 12, /* Command register 1. */ - CMR2 = 13, /* Command register 2. */ - MODSEL = 14, /* Mode select register. */ - MAR = 14, /* Memory address register (?). */ - CMR2_h = 0x1d, }; - -enum eepage_regs -{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */ +enum page0_regs { + /* The first six registers hold + * the ethernet physical station address. + */ + PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5, + TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */ + TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */ + ISR = 10, IMR = 11, /* Interrupt status and mask. */ + CMR1 = 12, /* Command register 1. */ + CMR2 = 13, /* Command register 2. */ + MODSEL = 14, /* Mode select register. */ + MAR = 14, /* Memory address register (?). */ + CMR2_h = 0x1d, +}; +enum eepage_regs { + PROM_CMD = 6, + PROM_DATA = 7 /* Note that PROM_CMD is in the "high" bits. */ +}; #define ISR_TxOK 0x01 #define ISR_RxOK 0x04 @@ -72,141 +75,146 @@ enum eepage_regs #define CMR2h_Normal 2 /* Accept physical and broadcast address. */ #define CMR2h_PROMISC 3 /* Promiscuous mode. */ -/* An inline function used below: it differs from inb() by explicitly return an unsigned - char, saving a truncation. */ +/* An inline function used below: it differs from inb() by explicitly + * return an unsigned char, saving a truncation. + */ static inline unsigned char inbyte(unsigned short port) { - unsigned char _v; - __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port)); - return _v; + unsigned char _v; + + __asm__ __volatile__ ("inb %w1,%b0" : "=a" (_v) : "d" (port)); + return _v; } /* Read register OFFSET. - This command should always be terminated with read_end(). */ + * This command should always be terminated with read_end(). + */ static inline unsigned char read_nibble(short port, unsigned char offset) { - unsigned char retval; - outb(EOC+offset, port + PAR_DATA); - outb(RdAddr+offset, port + PAR_DATA); - inbyte(port + PAR_STATUS); /* Settling time delay */ - retval = inbyte(port + PAR_STATUS); - outb(EOC+offset, port + PAR_DATA); - - return retval; + unsigned char retval; + + outb(EOC+offset, port + PAR_DATA); + outb(RdAddr+offset, port + PAR_DATA); + inbyte(port + PAR_STATUS); /* Settling time delay */ + retval = inbyte(port + PAR_STATUS); + outb(EOC+offset, port + PAR_DATA); + + return retval; } /* Functions for bulk data read. The interrupt line is always disabled. */ /* Get a byte using read mode 0, reading data from the control lines. */ static inline unsigned char read_byte_mode0(short ioaddr) { - unsigned char low_nib; - - outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); - inbyte(ioaddr + PAR_STATUS); - low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; - outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); - inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ - inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ - return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); + unsigned char low_nib; + + outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ + inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); } /* The same as read_byte_mode0(), but does multiple inb()s for stability. */ static inline unsigned char read_byte_mode2(short ioaddr) { - unsigned char low_nib; - - outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); - inbyte(ioaddr + PAR_STATUS); - low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; - outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); - inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ - return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); + unsigned char low_nib; + + outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); } /* Read a byte through the data register. */ static inline unsigned char read_byte_mode4(short ioaddr) { - unsigned char low_nib; + unsigned char low_nib; - outb(RdAddr | MAR, ioaddr + PAR_DATA); - low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; - outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); - return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); + outb(RdAddr | MAR, ioaddr + PAR_DATA); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); } /* Read a byte through the data register, double reading to allow settling. */ static inline unsigned char read_byte_mode6(short ioaddr) { - unsigned char low_nib; - - outb(RdAddr | MAR, ioaddr + PAR_DATA); - inbyte(ioaddr + PAR_STATUS); - low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; - outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); - inbyte(ioaddr + PAR_STATUS); - return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); + unsigned char low_nib; + + outb(RdAddr | MAR, ioaddr + PAR_DATA); + inbyte(ioaddr + PAR_STATUS); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); + inbyte(ioaddr + PAR_STATUS); + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); } static inline void write_reg(short port, unsigned char reg, unsigned char value) { - unsigned char outval; - outb(EOC | reg, port + PAR_DATA); - outval = WrAddr | reg; - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); /* Double write for PS/2. */ - - outval &= 0xf0; - outval |= value; - outb(outval, port + PAR_DATA); - outval &= 0x1f; - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); - - outb(EOC | outval, port + PAR_DATA); + unsigned char outval; + + outb(EOC | reg, port + PAR_DATA); + outval = WrAddr | reg; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); /* Double write for PS/2. */ + + outval &= 0xf0; + outval |= value; + outb(outval, port + PAR_DATA); + outval &= 0x1f; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); + + outb(EOC | outval, port + PAR_DATA); } static inline void write_reg_high(short port, unsigned char reg, unsigned char value) { - unsigned char outval = EOC | HNib | reg; + unsigned char outval = EOC | HNib | reg; - outb(outval, port + PAR_DATA); - outval &= WrAddr | HNib | 0x0f; - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); /* Double write for PS/2. */ + outb(outval, port + PAR_DATA); + outval &= WrAddr | HNib | 0x0f; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); /* Double write for PS/2. */ - outval = WrAddr | HNib | value; - outb(outval, port + PAR_DATA); - outval &= HNib | 0x0f; /* HNib | value */ - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); + outval = WrAddr | HNib | value; + outb(outval, port + PAR_DATA); + outval &= HNib | 0x0f; /* HNib | value */ + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); - outb(EOC | HNib | outval, port + PAR_DATA); + outb(EOC | HNib | outval, port + PAR_DATA); } /* Write a byte out using nibble mode. The low nibble is written first. */ static inline void write_reg_byte(short port, unsigned char reg, unsigned char value) { - unsigned char outval; - outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */ - outval = WrAddr | reg; - outb(outval, port + PAR_DATA); - outb(outval, port + PAR_DATA); /* Double write for PS/2. */ - - outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA); - outb(value & 0x0f, port + PAR_DATA); - value >>= 4; - outb(value, port + PAR_DATA); - outb(0x10 | value, port + PAR_DATA); - outb(0x10 | value, port + PAR_DATA); - - outb(EOC | value, port + PAR_DATA); /* Reset the address register. */ + unsigned char outval; + + outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */ + outval = WrAddr | reg; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); /* Double write for PS/2. */ + + outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA); + outb(value & 0x0f, port + PAR_DATA); + value >>= 4; + outb(value, port + PAR_DATA); + outb(0x10 | value, port + PAR_DATA); + outb(0x10 | value, port + PAR_DATA); + + outb(EOC | value, port + PAR_DATA); /* Reset the address register. */ } -/* - * Bulk data writes to the packet buffer. The interrupt line remains enabled. +/* Bulk data writes to the packet buffer. The interrupt line remains enabled. * The first, faster method uses only the dataport (data modes 0, 2 & 4). * The second (backup) method uses data and control regs (modes 1, 3 & 5). * It should only be needed when there is skew between the individual data @@ -214,28 +222,28 @@ write_reg_byte(short port, unsigned char reg, unsigned char value) */ static inline void write_byte_mode0(short ioaddr, unsigned char value) { - outb(value & 0x0f, ioaddr + PAR_DATA); - outb((value>>4) | 0x10, ioaddr + PAR_DATA); + outb(value & 0x0f, ioaddr + PAR_DATA); + outb((value>>4) | 0x10, ioaddr + PAR_DATA); } static inline void write_byte_mode1(short ioaddr, unsigned char value) { - outb(value & 0x0f, ioaddr + PAR_DATA); - outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL); - outb((value>>4) | 0x10, ioaddr + PAR_DATA); - outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL); + outb(value & 0x0f, ioaddr + PAR_DATA); + outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL); + outb((value>>4) | 0x10, ioaddr + PAR_DATA); + outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL); } /* Write 16bit VALUE to the packet buffer: the same as above just doubled. */ static inline void write_word_mode0(short ioaddr, unsigned short value) { - outb(value & 0x0f, ioaddr + PAR_DATA); - value >>= 4; - outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); - value >>= 4; - outb(value & 0x0f, ioaddr + PAR_DATA); - value >>= 4; - outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); + outb(value & 0x0f, ioaddr + PAR_DATA); + value >>= 4; + outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); + value >>= 4; + outb(value & 0x0f, ioaddr + PAR_DATA); + value >>= 4; + outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); } /* EEPROM_Ctrl bits. */ @@ -248,10 +256,10 @@ static inline void write_word_mode0(short ioaddr, unsigned short value) /* Delay between EEPROM clock transitions. */ #define eeprom_delay(ticks) \ -do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0) +do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0) /* The EEPROM commands include the alway-set leading bit. */ #define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17) -#define EE_READ(offset) (((6 << 6) + (offset)) << 17) +#define EE_READ(offset) (((6 << 6) + (offset)) << 17) #define EE_ERASE(offset) (((7 << 6) + (offset)) << 17) #define EE_CMD_SIZE 27 /* The command+address+data size. */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index cf154f74cba1e18e38d6e9e6d3fb31b01377576b..3dad7e88495210710a800951c8c7849ccbfa3c5f 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1377,6 +1377,16 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond) return RTL_R8(IBISR0) & 0x02; } +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); + rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); + RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); + RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); +} + static void rtl8168dp_driver_start(struct rtl8169_private *tp) { rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); @@ -1417,12 +1427,7 @@ static void rtl8168dp_driver_stop(struct rtl8169_private *tp) static void rtl8168ep_driver_stop(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); - rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); - RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); - RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); + rtl8168ep_stop_cmac(tp); ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10); @@ -5934,7 +5939,6 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); RTL_W8(MaxTxPacketSize, EarlySize); @@ -6027,7 +6031,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); RTL_W8(MaxTxPacketSize, EarlySize); @@ -6091,6 +6094,8 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; + rtl8168ep_stop_cmac(tp); + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); @@ -6109,7 +6114,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); RTL_W8(MaxTxPacketSize, EarlySize); @@ -6832,14 +6836,6 @@ err_out: return -EIO; } -static bool rtl_skb_pad(struct sk_buff *skb) -{ - if (skb_padto(skb, ETH_ZLEN)) - return false; - skb_put(skb, ETH_ZLEN - skb->len); - return true; -} - static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) { return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; @@ -6980,7 +6976,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, u8 ip_protocol; if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); + return !(skb_checksum_help(skb) || eth_skb_pad(skb)); if (transport_offset > TCPHO_MAX) { netif_warn(tp, tx_err, tp->dev, @@ -7015,7 +7011,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, opts[1] |= transport_offset << TCPHO_SHIFT; } else { if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return rtl_skb_pad(skb); + return !eth_skb_pad(skb); } return true; @@ -7264,7 +7260,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data, data = rtl8169_align(data); dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); prefetch(data); - skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); + skb = napi_alloc_skb(&tp->napi, pkt_size); if (skb) memcpy(skb->data, data, pkt_size); dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); @@ -8005,6 +8001,12 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp) return; } +static void rtl_hw_init_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + rtl_hw_init_8168g(tp); +} + static void rtl_hw_initialize(struct rtl8169_private *tp) { switch (tp->mac_version) { @@ -8017,12 +8019,13 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - rtl_hw_init_8168g(tp); + rtl_hw_init_8168ep(tp); break; - default: break; } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index b5db6b3f939fc00884f313829cb9c1ffff118981..c29ba80ae02bfde60f41f4118c02bab642303b89 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1,5 +1,6 @@ /* SuperH Ethernet device driver * + * Copyright (C) 2014 Renesas Electronics Corporation * Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2008-2014 Renesas Solutions Corp. * Copyright (C) 2013-2014 Cogent Embedded, Inc. @@ -1136,7 +1137,7 @@ static void sh_eth_ring_format(struct net_device *ndev) rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, DMA_FROM_DEVICE); - rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); + rxdesc->addr = virt_to_phys(skb->data); rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); /* Rx descriptor address set */ @@ -1387,11 +1388,14 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) int entry = mdp->cur_rx % mdp->num_rx_ring; int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; + int limit; struct sk_buff *skb; u16 pkt_len = 0; u32 desc_status; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; + boguscnt = min(boguscnt, *quota); + limit = boguscnt; rxdesc = &mdp->rx_ring[entry]; while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { desc_status = edmac_to_cpu(mdp, rxdesc->status); @@ -1400,11 +1404,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (--boguscnt < 0) break; - if (*quota <= 0) - break; - - (*quota)--; - if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++; @@ -1471,7 +1470,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) rxdesc->buffer_length, DMA_FROM_DEVICE); skb_checksum_none_assert(skb); - rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); + rxdesc->addr = virt_to_phys(skb->data); } if (entry >= mdp->num_rx_ring - 1) rxdesc->status |= @@ -1495,6 +1494,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) sh_eth_write(ndev, EDRRR_R, EDRRR); } + *quota -= limit - boguscnt - 1; + return *quota <= 0; } @@ -2746,6 +2747,7 @@ static const struct of_device_id sh_eth_match_table[] = { { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data }, { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data }, { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data }, + { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data }, { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data }, { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, { } @@ -2769,10 +2771,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (unlikely(res == NULL)) { - dev_err(&pdev->dev, "invalid resource\n"); - return -EINVAL; - } ndev = alloc_etherdev(sizeof(struct sh_eth_private)); if (!ndev) @@ -2781,8 +2779,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); - /* The sh Ether-specific entries in the device structure. */ - ndev->base_addr = res->start; devno = pdev->id; if (devno < 0) devno = 0; @@ -2806,6 +2802,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; } + ndev->base_addr = res->start; + spin_lock_init(&mdp->lock); mdp->pdev = pdev; @@ -2887,6 +2885,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) } } + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + /* MDIO bus init */ ret = sh_mdio_init(mdp, pd); if (ret) { @@ -2973,6 +2974,7 @@ static struct platform_device_id sh_eth_id_table[] = { { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data }, { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data }, + { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data }, { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data }, { } }; diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..b9952ef040e4478d9c07bba8046affe0f9fab7b0 --- /dev/null +++ b/drivers/net/ethernet/rocker/Kconfig @@ -0,0 +1,27 @@ +# +# Rocker device configuration +# + +config NET_VENDOR_ROCKER + bool "Rocker devices" + default y + ---help--- + If you have a network device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Rocker devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_ROCKER + +config ROCKER + tristate "Rocker switch driver (EXPERIMENTAL)" + depends on PCI && NET_SWITCHDEV && BRIDGE + ---help--- + This driver supports Rocker switch device. + + To compile this driver as a module, choose M here: the + module will be called rocker. + +endif # NET_VENDOR_ROCKER diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f85fb12f36f1fc87d0ef776131b9096fc75a8e8f --- /dev/null +++ b/drivers/net/ethernet/rocker/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Rocker network device drivers. +# + +obj-$(CONFIG_ROCKER) += rocker.o diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c new file mode 100644 index 0000000000000000000000000000000000000000..2f398fa4b9e607546e8f1255ec7a53bfa018114c --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker.c @@ -0,0 +1,4375 @@ +/* + * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver + * Copyright (c) 2014 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rocker.h" + +static const char rocker_driver_name[] = "rocker"; + +static const struct pci_device_id rocker_pci_id_table[] = { + {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, + {0, } +}; + +struct rocker_flow_tbl_key { + u32 priority; + enum rocker_of_dpa_table_id tbl_id; + union { + struct { + u32 in_lport; + u32 in_lport_mask; + enum rocker_of_dpa_table_id goto_tbl; + } ig_port; + struct { + u32 in_lport; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool untagged; + __be16 new_vlan_id; + } vlan; + struct { + u32 in_lport; + u32 in_lport_mask; + __be16 eth_type; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool copy_to_cpu; + } term_mac; + struct { + __be16 eth_type; + __be32 dst4; + __be32 dst4_mask; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + } ucast_routing; + struct { + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + int has_eth_dst; + int has_eth_dst_mask; + __be16 vlan_id; + u32 tunnel_id; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + bool copy_to_cpu; + } bridge; + struct { + u32 in_lport; + u32 in_lport_mask; + u8 eth_src[ETH_ALEN]; + u8 eth_src_mask[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 eth_type; + __be16 vlan_id; + __be16 vlan_id_mask; + u8 ip_proto; + u8 ip_proto_mask; + u8 ip_tos; + u8 ip_tos_mask; + u32 group_id; + } acl; + }; +}; + +struct rocker_flow_tbl_entry { + struct hlist_node entry; + u32 ref_count; + u64 cookie; + struct rocker_flow_tbl_key key; + u32 key_crc32; /* key */ +}; + +struct rocker_group_tbl_entry { + struct hlist_node entry; + u32 cmd; + u32 group_id; /* key */ + u16 group_count; + u32 *group_ids; + union { + struct { + u8 pop_vlan; + } l2_interface; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + u32 group_id; + } l2_rewrite; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + bool ttl_check; + u32 group_id; + } l3_unicast; + }; +}; + +struct rocker_fdb_tbl_entry { + struct hlist_node entry; + u32 key_crc32; /* key */ + bool learned; + struct rocker_fdb_tbl_key { + u32 lport; + u8 addr[ETH_ALEN]; + __be16 vlan_id; + } key; +}; + +struct rocker_internal_vlan_tbl_entry { + struct hlist_node entry; + int ifindex; /* key */ + u32 ref_count; + __be16 vlan_id; +}; + +struct rocker_desc_info { + char *data; /* mapped */ + size_t data_size; + size_t tlv_size; + struct rocker_desc *desc; + DEFINE_DMA_UNMAP_ADDR(mapaddr); +}; + +struct rocker_dma_ring_info { + size_t size; + u32 head; + u32 tail; + struct rocker_desc *desc; /* mapped */ + dma_addr_t mapaddr; + struct rocker_desc_info *desc_info; + unsigned int type; +}; + +struct rocker; + +enum { + ROCKER_CTRL_LINK_LOCAL_MCAST, + ROCKER_CTRL_LOCAL_ARP, + ROCKER_CTRL_IPV4_MCAST, + ROCKER_CTRL_IPV6_MCAST, + ROCKER_CTRL_DFLT_BRIDGING, + ROCKER_CTRL_MAX, +}; + +#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 +#define ROCKER_N_INTERNAL_VLANS 255 +#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) +#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) + +struct rocker_port { + struct net_device *dev; + struct net_device *bridge_dev; + struct rocker *rocker; + unsigned int port_number; + u32 lport; + __be16 internal_vlan_id; + int stp_state; + u32 brport_flags; + bool ctrls[ROCKER_CTRL_MAX]; + unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; + struct napi_struct napi_tx; + struct napi_struct napi_rx; + struct rocker_dma_ring_info tx_ring; + struct rocker_dma_ring_info rx_ring; +}; + +struct rocker { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct msix_entry *msix_entries; + unsigned int port_count; + struct rocker_port **ports; + struct { + u64 id; + } hw; + spinlock_t cmd_ring_lock; + struct rocker_dma_ring_info cmd_ring; + struct rocker_dma_ring_info event_ring; + DECLARE_HASHTABLE(flow_tbl, 16); + spinlock_t flow_tbl_lock; + u64 flow_tbl_next_cookie; + DECLARE_HASHTABLE(group_tbl, 16); + spinlock_t group_tbl_lock; + DECLARE_HASHTABLE(fdb_tbl, 16); + spinlock_t fdb_tbl_lock; + unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; + DECLARE_HASHTABLE(internal_vlan_tbl, 8); + spinlock_t internal_vlan_tbl_lock; +}; + +static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; +static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; +static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; + +/* Rocker priority levels for flow table entries. Higher + * priority match takes precedence over lower priority match. + */ + +enum { + ROCKER_PRIORITY_UNKNOWN = 0, + ROCKER_PRIORITY_IG_PORT = 1, + ROCKER_PRIORITY_VLAN = 1, + ROCKER_PRIORITY_TERM_MAC_UCAST = 0, + ROCKER_PRIORITY_TERM_MAC_MCAST = 1, + ROCKER_PRIORITY_UNICAST_ROUTING = 1, + ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, + ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, + ROCKER_PRIORITY_BRIDGING_VLAN = 3, + ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, + ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, + ROCKER_PRIORITY_BRIDGING_TENANT = 3, + ROCKER_PRIORITY_ACL_CTRL = 3, + ROCKER_PRIORITY_ACL_NORMAL = 2, + ROCKER_PRIORITY_ACL_DFLT = 1, +}; + +static bool rocker_vlan_id_is_internal(__be16 vlan_id) +{ + u16 start = ROCKER_INTERNAL_VLAN_ID_BASE; + u16 end = 0xffe; + u16 _vlan_id = ntohs(vlan_id); + + return (_vlan_id >= start && _vlan_id <= end); +} + +static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port, + u16 vid, bool *pop_vlan) +{ + __be16 vlan_id; + + if (pop_vlan) + *pop_vlan = false; + vlan_id = htons(vid); + if (!vlan_id) { + vlan_id = rocker_port->internal_vlan_id; + if (pop_vlan) + *pop_vlan = true; + } + + return vlan_id; +} + +static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port, + __be16 vlan_id) +{ + if (rocker_vlan_id_is_internal(vlan_id)) + return 0; + + return ntohs(vlan_id); +} + +static bool rocker_port_is_bridged(struct rocker_port *rocker_port) +{ + return !!rocker_port->bridge_dev; +} + +struct rocker_wait { + wait_queue_head_t wait; + bool done; + bool nowait; +}; + +static void rocker_wait_reset(struct rocker_wait *wait) +{ + wait->done = false; + wait->nowait = false; +} + +static void rocker_wait_init(struct rocker_wait *wait) +{ + init_waitqueue_head(&wait->wait); + rocker_wait_reset(wait); +} + +static struct rocker_wait *rocker_wait_create(gfp_t gfp) +{ + struct rocker_wait *wait; + + wait = kmalloc(sizeof(*wait), gfp); + if (!wait) + return NULL; + rocker_wait_init(wait); + return wait; +} + +static void rocker_wait_destroy(struct rocker_wait *work) +{ + kfree(work); +} + +static bool rocker_wait_event_timeout(struct rocker_wait *wait, + unsigned long timeout) +{ + wait_event_timeout(wait->wait, wait->done, HZ / 10); + if (!wait->done) + return false; + return true; +} + +static void rocker_wait_wake_up(struct rocker_wait *wait) +{ + wait->done = true; + wake_up(&wait->wait); +} + +static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector) +{ + return rocker->msix_entries[vector].vector; +} + +static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_TX(rocker_port->port_number)); +} + +static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_RX(rocker_port->port_number)); +} + +#define rocker_write32(rocker, reg, val) \ + writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read32(rocker, reg) \ + readl((rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_write64(rocker, reg, val) \ + writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read64(rocker, reg) \ + readq((rocker)->hw_addr + (ROCKER_ ## reg)) + +/***************************** + * HW basic testing functions + *****************************/ + +static int rocker_reg_test(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + u64 test_reg; + u64 rnd; + + rnd = prandom_u32(); + rnd >>= 1; + rocker_write32(rocker, TEST_REG, rnd); + test_reg = rocker_read32(rocker, TEST_REG); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", + test_reg, rnd * 2); + return -EIO; + } + + rnd = prandom_u32(); + rnd <<= 31; + rnd |= prandom_u32(); + rocker_write64(rocker, TEST_REG64, rnd); + test_reg = rocker_read64(rocker, TEST_REG64); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", + test_reg, rnd * 2); + return -EIO; + } + + return 0; +} + +static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait, + u32 test_type, dma_addr_t dma_handle, + unsigned char *buf, unsigned char *expect, + size_t size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + rocker_wait_reset(wait); + rocker_write32(rocker, TEST_DMA_CTRL, test_type); + + if (!rocker_wait_event_timeout(wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + return -EIO; + } + + for (i = 0; i < size; i++) { + if (buf[i] != expect[i]) { + dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", + buf[i], i, expect[i]); + return -EIO; + } + } + return 0; +} + +#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) +#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 + +static int rocker_dma_test_offset(struct rocker *rocker, + struct rocker_wait *wait, int offset) +{ + struct pci_dev *pdev = rocker->pdev; + unsigned char *alloc; + unsigned char *buf; + unsigned char *expect; + dma_addr_t dma_handle; + int i; + int err; + + alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, + GFP_KERNEL | GFP_DMA); + if (!alloc) + return -ENOMEM; + buf = alloc + offset; + expect = buf + ROCKER_TEST_DMA_BUF_SIZE; + + dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pdev, dma_handle)) { + err = -EIO; + goto free_alloc; + } + + rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); + rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); + + memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); + for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) + expect[i] = ~buf[i]; + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + +unmap: + pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); +free_alloc: + kfree(alloc); + + return err; +} + +static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait) +{ + int i; + int err; + + for (i = 0; i < 8; i++) { + err = rocker_dma_test_offset(rocker, wait, i); + if (err) + return err; + } + return 0; +} + +static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) +{ + struct rocker_wait *wait = dev_id; + + rocker_wait_wake_up(wait); + + return IRQ_HANDLED; +} + +static int rocker_basic_hw_test(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + struct rocker_wait wait; + int err; + + err = rocker_reg_test(rocker); + if (err) { + dev_err(&pdev->dev, "reg test failed\n"); + return err; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), + rocker_test_irq_handler, 0, + rocker_driver_name, &wait); + if (err) { + dev_err(&pdev->dev, "cannot assign test irq\n"); + return err; + } + + rocker_wait_init(&wait); + rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); + + if (!rocker_wait_event_timeout(&wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + err = -EIO; + goto free_irq; + } + + err = rocker_dma_test(rocker, &wait); + if (err) + dev_err(&pdev->dev, "dma test failed\n"); + +free_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); + return err; +} + +/****** + * TLV + ******/ + +#define ROCKER_TLV_ALIGNTO 8U +#define ROCKER_TLV_ALIGN(len) \ + (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) +#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) + +/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (struct rocker_tlv) | ing | | ing | + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * <--------------------------- tlv->len --------------------------> + */ + +static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, + int *remaining) +{ + int totlen = ROCKER_TLV_ALIGN(tlv->len); + + *remaining -= totlen; + return (struct rocker_tlv *) ((char *) tlv + totlen); +} + +static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) +{ + return remaining >= (int) ROCKER_TLV_HDRLEN && + tlv->len >= ROCKER_TLV_HDRLEN && + tlv->len <= remaining; +} + +#define rocker_tlv_for_each(pos, head, len, rem) \ + for (pos = head, rem = len; \ + rocker_tlv_ok(pos, rem); \ + pos = rocker_tlv_next(pos, &(rem))) + +#define rocker_tlv_for_each_nested(pos, tlv, rem) \ + rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ + rocker_tlv_len(tlv), rem) + +static int rocker_tlv_attr_size(int payload) +{ + return ROCKER_TLV_HDRLEN + payload; +} + +static int rocker_tlv_total_size(int payload) +{ + return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); +} + +static int rocker_tlv_padlen(int payload) +{ + return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); +} + +static int rocker_tlv_type(const struct rocker_tlv *tlv) +{ + return tlv->type; +} + +static void *rocker_tlv_data(const struct rocker_tlv *tlv) +{ + return (char *) tlv + ROCKER_TLV_HDRLEN; +} + +static int rocker_tlv_len(const struct rocker_tlv *tlv) +{ + return tlv->len - ROCKER_TLV_HDRLEN; +} + +static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) +{ + return *(u8 *) rocker_tlv_data(tlv); +} + +static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) +{ + return *(u16 *) rocker_tlv_data(tlv); +} + +static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) +{ + return *(__be16 *) rocker_tlv_data(tlv); +} + +static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) +{ + return *(u32 *) rocker_tlv_data(tlv); +} + +static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) +{ + return *(u64 *) rocker_tlv_data(tlv); +} + +static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len) +{ + const struct rocker_tlv *tlv; + const struct rocker_tlv *head = (const struct rocker_tlv *) buf; + int rem; + + memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); + + rocker_tlv_for_each(tlv, head, buf_len, rem) { + u32 type = rocker_tlv_type(tlv); + + if (type > 0 && type <= maxtype) + tb[type] = (struct rocker_tlv *) tlv; + } +} + +static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype, + const struct rocker_tlv *tlv) +{ + rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), + rocker_tlv_len(tlv)); +} + +static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype, + struct rocker_desc_info *desc_info) +{ + rocker_tlv_parse(tb, maxtype, desc_info->data, + desc_info->desc->tlv_size); +} + +static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info) +{ + return (struct rocker_tlv *) ((char *) desc_info->data + + desc_info->tlv_size); +} + +static int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data) +{ + int tail_room = desc_info->data_size - desc_info->tlv_size; + int total_size = rocker_tlv_total_size(attrlen); + struct rocker_tlv *tlv; + + if (unlikely(tail_room < total_size)) + return -EMSGSIZE; + + tlv = rocker_tlv_start(desc_info); + desc_info->tlv_size += total_size; + tlv->type = attrtype; + tlv->len = rocker_tlv_attr_size(attrlen); + memcpy(rocker_tlv_data(tlv), data, attrlen); + memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); + return 0; +} + +static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, + int attrtype, u8 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); +} + +static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, + int attrtype, u16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); +} + +static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, + int attrtype, __be16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); +} + +static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, + int attrtype, u32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); +} + +static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, + int attrtype, __be32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); +} + +static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, + int attrtype, u64 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); +} + +static struct rocker_tlv * +rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) +{ + struct rocker_tlv *start = rocker_tlv_start(desc_info); + + if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) + return NULL; + + return start; +} + +static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; +} + +static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + desc_info->tlv_size = (char *) start - desc_info->data; +} + +/****************************************** + * DMA rings and descriptors manipulations + ******************************************/ + +static u32 __pos_inc(u32 pos, size_t limit) +{ + return ++pos == limit ? 0 : pos; +} + +static int rocker_desc_err(struct rocker_desc_info *desc_info) +{ + return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN); +} + +static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info) +{ + desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; +} + +static bool rocker_desc_gen(struct rocker_desc_info *desc_info) +{ + u32 comp_err = desc_info->desc->comp_err; + + return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; +} + +static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info) +{ + return (void *) desc_info->desc->cookie; +} + +static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info, + void *ptr) +{ + desc_info->desc->cookie = (long) ptr; +} + +static struct rocker_desc_info * +rocker_desc_head_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + u32 head = __pos_inc(info->head, info->size); + + desc_info = &info->desc_info[info->head]; + if (head == info->tail) + return NULL; /* ring full */ + desc_info->tlv_size = 0; + return desc_info; +} + +static void rocker_desc_commit(struct rocker_desc_info *desc_info) +{ + desc_info->desc->buf_size = desc_info->data_size; + desc_info->desc->tlv_size = desc_info->tlv_size; +} + +static void rocker_desc_head_set(struct rocker *rocker, + struct rocker_dma_ring_info *info, + struct rocker_desc_info *desc_info) +{ + u32 head = __pos_inc(info->head, info->size); + + BUG_ON(head == info->tail); + rocker_desc_commit(desc_info); + info->head = head; + rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); +} + +static struct rocker_desc_info * +rocker_desc_tail_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + + if (info->tail == info->head) + return NULL; /* nothing to be done between head and tail */ + desc_info = &info->desc_info[info->tail]; + if (!rocker_desc_gen(desc_info)) + return NULL; /* gen bit not set, desc is not ready yet */ + info->tail = __pos_inc(info->tail, info->size); + desc_info->tlv_size = desc_info->desc->tlv_size; + return desc_info; +} + +static void rocker_dma_ring_credits_set(struct rocker *rocker, + struct rocker_dma_ring_info *info, + u32 credits) +{ + if (credits) + rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); +} + +static unsigned long rocker_dma_ring_size_fix(size_t size) +{ + return max(ROCKER_DMA_SIZE_MIN, + min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); +} + +static int rocker_dma_ring_create(struct rocker *rocker, + unsigned int type, + size_t size, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(size != rocker_dma_ring_size_fix(size)); + info->size = size; + info->type = type; + info->head = 0; + info->tail = 0; + info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), + GFP_KERNEL); + if (!info->desc_info) + return -ENOMEM; + + info->desc = pci_alloc_consistent(rocker->pdev, + info->size * sizeof(*info->desc), + &info->mapaddr); + if (!info->desc) { + kfree(info->desc_info); + return -ENOMEM; + } + + for (i = 0; i < info->size; i++) + info->desc_info[i].desc = &info->desc[i]; + + rocker_write32(rocker, DMA_DESC_CTRL(info->type), + ROCKER_DMA_DESC_CTRL_RESET); + rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); + rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); + + return 0; +} + +static void rocker_dma_ring_destroy(struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); + + pci_free_consistent(rocker->pdev, + info->size * sizeof(struct rocker_desc), + info->desc, info->mapaddr); + kfree(info->desc_info); +} + +static void rocker_dma_ring_pass_to_producer(struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(info->head || info->tail); + + /* When ring is consumer, we need to advance head for each desc. + * That tells hw that the desc is ready to be used by it. + */ + for (i = 0; i < info->size - 1; i++) + rocker_desc_head_set(rocker, info, &info->desc_info[i]); + rocker_desc_commit(&info->desc_info[i]); +} + +static int rocker_dma_ring_bufs_alloc(struct rocker *rocker, + struct rocker_dma_ring_info *info, + int direction, size_t buf_size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + int err; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + dma_addr_t dma_handle; + char *buf; + + buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); + if (!buf) { + err = -ENOMEM; + goto rollback; + } + + dma_handle = pci_map_single(pdev, buf, buf_size, direction); + if (pci_dma_mapping_error(pdev, dma_handle)) { + kfree(buf); + err = -EIO; + goto rollback; + } + + desc_info->data = buf; + desc_info->data_size = buf_size; + dma_unmap_addr_set(desc_info, mapaddr, dma_handle); + + desc->buf_addr = dma_handle; + desc->buf_size = buf_size; + } + return 0; + +rollback: + for (i--; i >= 0; i--) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } + return err; +} + +static void rocker_dma_ring_bufs_free(struct rocker *rocker, + struct rocker_dma_ring_info *info, + int direction) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + + desc->buf_addr = 0; + desc->buf_size = 0; + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } +} + +static int rocker_dma_rings_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, + ROCKER_DMA_CMD_DEFAULT_SIZE, + &rocker->cmd_ring); + if (err) { + dev_err(&pdev->dev, "failed to create command dma ring\n"); + return err; + } + + spin_lock_init(&rocker->cmd_ring_lock); + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); + goto err_dma_cmd_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, + ROCKER_DMA_EVENT_DEFAULT_SIZE, + &rocker->event_ring); + if (err) { + dev_err(&pdev->dev, "failed to create event dma ring\n"); + goto err_dma_event_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, + PCI_DMA_FROMDEVICE, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); + goto err_dma_event_ring_bufs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); + return 0; + +err_dma_event_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->event_ring); +err_dma_event_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_cmd_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); + return err; +} + +static void rocker_dma_rings_fini(struct rocker *rocker) +{ + rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->event_ring); + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); +} + +static int rocker_dma_rx_ring_skb_map(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + struct sk_buff *skb, size_t buf_len) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + + dma_handle = pci_map_single(pdev, skb->data, buf_len, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, dma_handle)) + return -EIO; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) + goto tlv_put_failure; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) + goto tlv_put_failure; + return 0; + +tlv_put_failure: + pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); + desc_info->tlv_size = 0; + return -EMSGSIZE; +} + +static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port) +{ + return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; +} + +static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct net_device *dev = rocker_port->dev; + struct sk_buff *skb; + size_t buf_len = rocker_port_rx_buf_len(rocker_port); + int err; + + /* Ensure that hw will see tlv_size zero in case of an error. + * That tells hw to use another descriptor. + */ + rocker_desc_cookie_ptr_set(desc_info, NULL); + desc_info->tlv_size = 0; + + skb = netdev_alloc_skb_ip_align(dev, buf_len); + if (!skb) + return -ENOMEM; + err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info, + skb, buf_len); + if (err) { + dev_kfree_skb_any(skb); + return err; + } + rocker_desc_cookie_ptr_set(desc_info, skb); + return 0; +} + +static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker, + struct rocker_tlv **attrs) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + size_t len; + + if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || + !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) + return; + dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); + len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); + pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); +} + +static void rocker_dma_rx_ring_skb_free(struct rocker *rocker, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + + if (!skb) + return; + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + dev_kfree_skb_any(skb); +} + +static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + int i; + int err; + + for (i = 0; i < rx_ring->size; i++) { + err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, + &rx_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); + return err; +} + +static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + int i; + + for (i = 0; i < rx_ring->size; i++) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); +} + +static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + int err; + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_TX(rocker_port->port_number), + ROCKER_DMA_TX_DEFAULT_SIZE, + &rocker_port->tx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); + return err; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE, + ROCKER_DMA_TX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); + goto err_dma_tx_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_RX(rocker_port->port_number), + ROCKER_DMA_RX_DEFAULT_SIZE, + &rocker_port->rx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); + goto err_dma_rx_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL, + ROCKER_DMA_RX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); + goto err_dma_rx_ring_bufs_alloc; + } + + err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); + goto err_dma_rx_ring_skbs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); + + return 0; + +err_dma_rx_ring_skbs_alloc: + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_rx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); +err_dma_rx_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); +err_dma_tx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); + return err; +} + +static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + + rocker_dma_rx_ring_skbs_free(rocker, rocker_port); + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); +} + +static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) +{ + u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); + + if (enable) + val |= 1 << rocker_port->lport; + else + val &= ~(1 << rocker_port->lport); + rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); +} + +/******************************** + * Interrupt handler and helpers + ********************************/ + +static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + u32 credits = 0; + + spin_lock(&rocker->cmd_ring_lock); + while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { + wait = rocker_desc_cookie_ptr_get(desc_info); + if (wait->nowait) { + rocker_desc_gen_clear(desc_info); + rocker_wait_destroy(wait); + } else { + rocker_wait_wake_up(wait); + } + credits++; + } + spin_unlock(&rocker->cmd_ring_lock); + rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); + + return IRQ_HANDLED; +} + +static void rocker_port_link_up(struct rocker_port *rocker_port) +{ + netif_carrier_on(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is up\n"); +} + +static void rocker_port_link_down(struct rocker_port *rocker_port) +{ + netif_carrier_off(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is down\n"); +} + +static int rocker_event_link_change(struct rocker *rocker, + const struct rocker_tlv *info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; + unsigned int port_number; + bool link_up; + struct rocker_port *rocker_port; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] || + !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1; + link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + if (netif_carrier_ok(rocker_port->dev) != link_up) { + if (link_up) + rocker_port_link_up(rocker_port); + else + rocker_port_link_down(rocker_port); + } + + return 0; +} + +#define ROCKER_OP_FLAG_REMOVE BIT(0) +#define ROCKER_OP_FLAG_NOWAIT BIT(1) +#define ROCKER_OP_FLAG_LEARNED BIT(2) +#define ROCKER_OP_FLAG_REFRESH BIT(3) + +static int rocker_port_fdb(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id, int flags); + +static int rocker_event_mac_vlan_seen(struct rocker *rocker, + const struct rocker_tlv *info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; + unsigned int port_number; + struct rocker_port *rocker_port; + unsigned char *addr; + int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED; + __be16 vlan_id; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1; + addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); + vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + + if (rocker_port->stp_state != BR_STATE_LEARNING && + rocker_port->stp_state != BR_STATE_FORWARDING) + return 0; + + return rocker_port_fdb(rocker_port, addr, vlan_id, flags); +} + +static int rocker_event_process(struct rocker *rocker, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; + struct rocker_tlv *info; + u16 type; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); + if (!attrs[ROCKER_TLV_EVENT_TYPE] || + !attrs[ROCKER_TLV_EVENT_INFO]) + return -EIO; + + type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); + info = attrs[ROCKER_TLV_EVENT_INFO]; + + switch (type) { + case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: + return rocker_event_link_change(rocker, info); + case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: + return rocker_event_mac_vlan_seen(rocker, info); + } + + return -EOPNOTSUPP; +} + +static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + struct pci_dev *pdev = rocker->pdev; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + dev_err(&pdev->dev, "event desc received with err %d\n", + err); + } else { + err = rocker_event_process(rocker, desc_info); + if (err) + dev_err(&pdev->dev, "event processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); + credits++; + } + rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); + + return IRQ_HANDLED; +} + +static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_tx); + return IRQ_HANDLED; +} + +static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_rx); + return IRQ_HANDLED; +} + +/******************** + * Command interface + ********************/ + +typedef int (*rocker_cmd_cb_t)(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv); + +static int rocker_cmd_exec(struct rocker *rocker, + struct rocker_port *rocker_port, + rocker_cmd_cb_t prepare, void *prepare_priv, + rocker_cmd_cb_t process, void *process_priv, + bool nowait) +{ + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + unsigned long flags; + int err; + + wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL); + if (!wait) + return -ENOMEM; + wait->nowait = nowait; + + spin_lock_irqsave(&rocker->cmd_ring_lock, flags); + desc_info = rocker_desc_head_get(&rocker->cmd_ring); + if (!desc_info) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + err = -EAGAIN; + goto out; + } + err = prepare(rocker, rocker_port, desc_info, prepare_priv); + if (err) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + goto out; + } + rocker_desc_cookie_ptr_set(desc_info, wait); + rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + + if (nowait) + return 0; + + if (!rocker_wait_event_timeout(wait, HZ / 10)) + return -EIO; + + err = rocker_desc_err(desc_info); + if (err) + return err; + + if (process) + err = process(rocker, rocker_port, desc_info, process_priv); + + rocker_desc_gen_clear(desc_info); +out: + rocker_wait_destroy(wait); + return err; +} + +static int +rocker_cmd_get_port_settings_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + u32 speed; + u8 duplex; + u8 autoneg; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) + return -EIO; + + speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); + duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); + autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = SUPPORTED_TP; + ecmd->phy_address = 0xff; + ecmd->port = PORT_TP; + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; + ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int +rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; + if (!attr) + return -EIO; + + if (rocker_tlv_len(attr) != ETH_ALEN) + return -EINVAL; + + ether_addr_copy(macaddr, rocker_tlv_data(attr)); + return 0; +} + +static int +rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, + ethtool_cmd_speed(ecmd))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, + ecmd->duplex)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, + ecmd->autoneg)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, + ETH_ALEN, macaddr)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_learning_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, + !!(rocker_port->brport_flags & BR_LEARNING))) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_ethtool_proc, + ecmd, false); +} + +static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_macaddr_proc, + macaddr, false); +} + +static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_settings_ethtool_prep, + ecmd, NULL, NULL, false); +} + +static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_settings_macaddr_prep, + macaddr, NULL, NULL, false); +} + +static int rocker_port_set_learning(struct rocker_port *rocker_port) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_learning_prep, + NULL, NULL, NULL, false); +} + +static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, + entry->key.ig_port.in_lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, + entry->key.ig_port.in_lport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ig_port.goto_tbl)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, + entry->key.vlan.in_lport)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.vlan.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.vlan.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.vlan.goto_tbl)) + return -EMSGSIZE; + if (entry->key.vlan.untagged && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, + entry->key.vlan.new_vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, + entry->key.term_mac.in_lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, + entry->key.term_mac.in_lport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.term_mac.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.term_mac.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.term_mac.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.term_mac.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.term_mac.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.term_mac.goto_tbl)) + return -EMSGSIZE; + if (entry->key.term_mac.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.term_mac.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.ucast_routing.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, + entry->key.ucast_routing.dst4)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, + entry->key.ucast_routing.dst4_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ucast_routing.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.ucast_routing.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (entry->key.bridge.has_eth_dst && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.bridge.eth_dst)) + return -EMSGSIZE; + if (entry->key.bridge.has_eth_dst_mask && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.bridge.eth_dst_mask)) + return -EMSGSIZE; + if (entry->key.bridge.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.bridge.vlan_id)) + return -EMSGSIZE; + if (entry->key.bridge.tunnel_id && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, + entry->key.bridge.tunnel_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.bridge.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.bridge.group_id)) + return -EMSGSIZE; + if (entry->key.bridge.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.bridge.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, + entry->key.acl.in_lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, + entry->key.acl.in_lport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->key.acl.eth_src)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_src_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.acl.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.acl.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.acl.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.acl.vlan_id_mask)) + return -EMSGSIZE; + + switch (ntohs(entry->key.acl.eth_type)) { + case ETH_P_IP: + case ETH_P_IPV6: + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, + entry->key.acl.ip_proto)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, + entry->key.acl.ip_proto_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, + entry->key.acl.ip_tos & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, + entry->key.acl.ip_tos_mask & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, + (entry->key.acl.ip_tos & 0xc0) >> 6)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_ECN_MASK, + (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) + return -EMSGSIZE; + break; + } + + if (entry->key.acl.group_id != ROCKER_GROUP_NONE && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.acl.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, + entry->key.tbl_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, + entry->key.priority)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + + switch (entry->key.tbl_id) { + case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: + err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_VLAN: + err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: + err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: + err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_BRIDGING: + err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: + err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int rocker_cmd_flow_tbl_del(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT, + ROCKER_GROUP_PORT_GET(entry->group_id))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, + entry->l2_interface.pop_vlan)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l2_rewrite.group_id)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l2_rewrite.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l2_rewrite.eth_dst)) + return -EMSGSIZE; + if (entry->l2_rewrite.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l2_rewrite.vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + int i; + struct rocker_tlv *group_ids; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, + entry->group_count)) + return -EMSGSIZE; + + group_ids = rocker_tlv_nest_start(desc_info, + ROCKER_TLV_OF_DPA_GROUP_IDS); + if (!group_ids) + return -EMSGSIZE; + + for (i = 0; i < entry->group_count; i++) + /* Note TLV array is 1-based */ + if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, group_ids); + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l3_unicast.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l3_unicast.eth_dst)) + return -EMSGSIZE; + if (entry->l3_unicast.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l3_unicast.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, + entry->l3_unicast.ttl_check)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l3_unicast.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_group_tbl_add(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int rocker_cmd_group_tbl_del(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +/***************************************** + * Flow, group, FDB, internal VLAN tables + *****************************************/ + +static int rocker_init_tbls(struct rocker *rocker) +{ + hash_init(rocker->flow_tbl); + spin_lock_init(&rocker->flow_tbl_lock); + + hash_init(rocker->group_tbl); + spin_lock_init(&rocker->group_tbl_lock); + + hash_init(rocker->fdb_tbl); + spin_lock_init(&rocker->fdb_tbl_lock); + + hash_init(rocker->internal_vlan_tbl); + spin_lock_init(&rocker->internal_vlan_tbl_lock); + + return 0; +} + +static void rocker_free_tbls(struct rocker *rocker) +{ + unsigned long flags; + struct rocker_flow_tbl_entry *flow_entry; + struct rocker_group_tbl_entry *group_entry; + struct rocker_fdb_tbl_entry *fdb_entry; + struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; + struct hlist_node *tmp; + int bkt; + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) + hash_del(&flow_entry->entry); + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) + hash_del(&group_entry->entry); + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + spin_lock_irqsave(&rocker->fdb_tbl_lock, flags); + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) + hash_del(&fdb_entry->entry); + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags); + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags); + hash_for_each_safe(rocker->internal_vlan_tbl, bkt, + tmp, internal_vlan_entry, entry) + hash_del(&internal_vlan_entry->entry); + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); +} + +static struct rocker_flow_tbl_entry * +rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match) +{ + struct rocker_flow_tbl_entry *found; + + hash_for_each_possible(rocker->flow_tbl, found, + entry, match->key_crc32) { + if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + return found; + } + + return NULL; +} + +static int rocker_flow_tbl_add(struct rocker_port *rocker_port, + struct rocker_flow_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_flow_tbl_entry *found; + unsigned long flags; + bool add_to_hw = false; + int err = 0; + + match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + + found = rocker_flow_tbl_find(rocker, match); + + if (found) { + kfree(match); + } else { + found = match; + found->cookie = rocker->flow_tbl_next_cookie++; + hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); + add_to_hw = true; + } + + found->ref_count++; + + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + if (add_to_hw) { + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_flow_tbl_add, + found, NULL, NULL, nowait); + if (err) { + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + hash_del(&found->entry); + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + kfree(found); + } + } + + return err; +} + +static int rocker_flow_tbl_del(struct rocker_port *rocker_port, + struct rocker_flow_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_flow_tbl_entry *found; + unsigned long flags; + bool del_from_hw = false; + int err = 0; + + match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + + found = rocker_flow_tbl_find(rocker, match); + + if (found) { + found->ref_count--; + if (found->ref_count == 0) { + hash_del(&found->entry); + del_from_hw = true; + } + } + + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + kfree(match); + + if (del_from_hw) { + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_flow_tbl_del, + found, NULL, NULL, nowait); + kfree(found); + } + + return err; +} + +static gfp_t rocker_op_flags_gfp(int flags) +{ + return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL; +} + +static int rocker_flow_tbl_do(struct rocker_port *rocker_port, + int flags, struct rocker_flow_tbl_entry *entry) +{ + bool nowait = flags & ROCKER_OP_FLAG_NOWAIT; + + if (flags & ROCKER_OP_FLAG_REMOVE) + return rocker_flow_tbl_del(rocker_port, entry, nowait); + else + return rocker_flow_tbl_add(rocker_port, entry, nowait); +} + +static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, + int flags, u32 in_lport, u32 in_lport_mask, + enum rocker_of_dpa_table_id goto_tbl) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.priority = ROCKER_PRIORITY_IG_PORT; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; + entry->key.ig_port.in_lport = in_lport; + entry->key.ig_port.in_lport_mask = in_lport_mask; + entry->key.ig_port.goto_tbl = goto_tbl; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, + int flags, u32 in_lport, + __be16 vlan_id, __be16 vlan_id_mask, + enum rocker_of_dpa_table_id goto_tbl, + bool untagged, __be16 new_vlan_id) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.priority = ROCKER_PRIORITY_VLAN; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; + entry->key.vlan.in_lport = in_lport; + entry->key.vlan.vlan_id = vlan_id; + entry->key.vlan.vlan_id_mask = vlan_id_mask; + entry->key.vlan.goto_tbl = goto_tbl; + + entry->key.vlan.untagged = untagged; + entry->key.vlan.new_vlan_id = new_vlan_id; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, + u32 in_lport, u32 in_lport_mask, + __be16 eth_type, const u8 *eth_dst, + const u8 *eth_dst_mask, __be16 vlan_id, + __be16 vlan_id_mask, bool copy_to_cpu, + int flags) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + if (is_multicast_ether_addr(eth_dst)) { + entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; + } else { + entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + } + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + entry->key.term_mac.in_lport = in_lport; + entry->key.term_mac.in_lport_mask = in_lport_mask; + entry->key.term_mac.eth_type = eth_type; + ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); + ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); + entry->key.term_mac.vlan_id = vlan_id; + entry->key.term_mac.vlan_id_mask = vlan_id_mask; + entry->key.term_mac.copy_to_cpu = copy_to_cpu; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, + int flags, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 vlan_id, u32 tunnel_id, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, bool copy_to_cpu) +{ + struct rocker_flow_tbl_entry *entry; + u32 priority; + bool vlan_bridging = !!vlan_id; + bool dflt = !eth_dst || (eth_dst && eth_dst_mask); + bool wild = false; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + + if (eth_dst) { + entry->key.bridge.has_eth_dst = 1; + ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); + } + if (eth_dst_mask) { + entry->key.bridge.has_eth_dst_mask = 1; + ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); + if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN)) + wild = true; + } + + priority = ROCKER_PRIORITY_UNKNOWN; + if (vlan_bridging && dflt && wild) + priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD; + else if (vlan_bridging && dflt && !wild) + priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; + else if (vlan_bridging && !dflt) + priority = ROCKER_PRIORITY_BRIDGING_VLAN; + else if (!vlan_bridging && dflt && wild) + priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD; + else if (!vlan_bridging && dflt && !wild) + priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; + else if (!vlan_bridging && !dflt) + priority = ROCKER_PRIORITY_BRIDGING_TENANT; + + entry->key.priority = priority; + entry->key.bridge.vlan_id = vlan_id; + entry->key.bridge.tunnel_id = tunnel_id; + entry->key.bridge.goto_tbl = goto_tbl; + entry->key.bridge.group_id = group_id; + entry->key.bridge.copy_to_cpu = copy_to_cpu; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, + int flags, u32 in_lport, + u32 in_lport_mask, + const u8 *eth_src, const u8 *eth_src_mask, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 eth_type, + __be16 vlan_id, __be16 vlan_id_mask, + u8 ip_proto, u8 ip_proto_mask, + u8 ip_tos, u8 ip_tos_mask, + u32 group_id) +{ + u32 priority; + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + priority = ROCKER_PRIORITY_ACL_NORMAL; + if (eth_dst && eth_dst_mask) { + if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0) + priority = ROCKER_PRIORITY_ACL_DFLT; + else if (is_link_local_ether_addr(eth_dst)) + priority = ROCKER_PRIORITY_ACL_CTRL; + } + + entry->key.priority = priority; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + entry->key.acl.in_lport = in_lport; + entry->key.acl.in_lport_mask = in_lport_mask; + + if (eth_src) + ether_addr_copy(entry->key.acl.eth_src, eth_src); + if (eth_src_mask) + ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); + if (eth_dst) + ether_addr_copy(entry->key.acl.eth_dst, eth_dst); + if (eth_dst_mask) + ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); + + entry->key.acl.eth_type = eth_type; + entry->key.acl.vlan_id = vlan_id; + entry->key.acl.vlan_id_mask = vlan_id_mask; + entry->key.acl.ip_proto = ip_proto; + entry->key.acl.ip_proto_mask = ip_proto_mask; + entry->key.acl.ip_tos = ip_tos; + entry->key.acl.ip_tos_mask = ip_tos_mask; + entry->key.acl.group_id = group_id; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static struct rocker_group_tbl_entry * +rocker_group_tbl_find(struct rocker *rocker, + struct rocker_group_tbl_entry *match) +{ + struct rocker_group_tbl_entry *found; + + hash_for_each_possible(rocker->group_tbl, found, + entry, match->group_id) { + if (found->group_id == match->group_id) + return found; + } + + return NULL; +} + +static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry) +{ + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + kfree(entry->group_ids); + break; + default: + break; + } + kfree(entry); +} + +static int rocker_group_tbl_add(struct rocker_port *rocker_port, + struct rocker_group_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_group_tbl_entry *found; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + + found = rocker_group_tbl_find(rocker, match); + + if (found) { + hash_del(&found->entry); + rocker_group_tbl_entry_free(found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; + } else { + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; + } + + hash_add(rocker->group_tbl, &found->entry, found->group_id); + + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + if (found->cmd) + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_group_tbl_add, + found, NULL, NULL, nowait); + + return err; +} + +static int rocker_group_tbl_del(struct rocker_port *rocker_port, + struct rocker_group_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_group_tbl_entry *found; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + + found = rocker_group_tbl_find(rocker, match); + + if (found) { + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; + } + + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + rocker_group_tbl_entry_free(match); + + if (found) { + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_group_tbl_del, + found, NULL, NULL, nowait); + rocker_group_tbl_entry_free(found); + } + + return err; +} + +static int rocker_group_tbl_do(struct rocker_port *rocker_port, + int flags, struct rocker_group_tbl_entry *entry) +{ + bool nowait = flags & ROCKER_OP_FLAG_NOWAIT; + + if (flags & ROCKER_OP_FLAG_REMOVE) + return rocker_group_tbl_del(rocker_port, entry, nowait); + else + return rocker_group_tbl_add(rocker_port, entry, nowait); +} + +static int rocker_group_l2_interface(struct rocker_port *rocker_port, + int flags, __be16 vlan_id, + u32 out_lport, int pop_vlan) +{ + struct rocker_group_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + entry->l2_interface.pop_vlan = pop_vlan; + + return rocker_group_tbl_do(rocker_port, flags, entry); +} + +static int rocker_group_l2_fan_out(struct rocker_port *rocker_port, + int flags, u8 group_count, + u32 *group_ids, u32 group_id) +{ + struct rocker_group_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->group_id = group_id; + entry->group_count = group_count; + + entry->group_ids = kcalloc(group_count, sizeof(u32), + rocker_op_flags_gfp(flags)); + if (!entry->group_ids) { + kfree(entry); + return -ENOMEM; + } + memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); + + return rocker_group_tbl_do(rocker_port, flags, entry); +} + +static int rocker_group_l2_flood(struct rocker_port *rocker_port, + int flags, __be16 vlan_id, + u8 group_count, u32 *group_ids, + u32 group_id) +{ + return rocker_group_l2_fan_out(rocker_port, flags, + group_count, group_ids, + group_id); +} + +static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, + int flags, __be16 vlan_id) +{ + struct rocker_port *p; + struct rocker *rocker = rocker_port->rocker; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 group_ids[rocker->port_count]; + u8 group_count = 0; + int err; + int i; + + /* Adjust the flood group for this VLAN. The flood group + * references an L2 interface group for each port in this + * VLAN. + */ + + for (i = 0; i < rocker->port_count; i++) { + p = rocker->ports[i]; + if (!rocker_port_is_bridged(p)) + continue; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { + group_ids[group_count++] = + ROCKER_GROUP_L2_INTERFACE(vlan_id, + p->lport); + } + } + + /* If there are no bridged ports in this VLAN, we're done */ + if (group_count == 0) + return 0; + + err = rocker_group_l2_flood(rocker_port, flags, vlan_id, + group_count, group_ids, + group_id); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 flood group\n", err); + + return err; +} + +static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, + int flags, __be16 vlan_id, + bool pop_vlan) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_port *p; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + u32 out_lport; + int ref = 0; + int err; + int i; + + /* An L2 interface group for this port in this VLAN, but + * only when port STP state is LEARNING|FORWARDING. + */ + + if (rocker_port->stp_state == BR_STATE_LEARNING || + rocker_port->stp_state == BR_STATE_FORWARDING) { + out_lport = rocker_port->lport; + err = rocker_group_l2_interface(rocker_port, flags, + vlan_id, out_lport, + pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for lport %d\n", + err, out_lport); + return err; + } + } + + /* An L2 interface group for this VLAN to CPU port. + * Add when first port joins this VLAN and destroy when + * last port leaves this VLAN. + */ + + for (i = 0; i < rocker->port_count; i++) { + p = rocker->ports[i]; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) + ref++; + } + + if ((!adding || ref != 1) && (adding || ref != 0)) + return 0; + + out_lport = 0; + err = rocker_group_l2_interface(rocker_port, flags, + vlan_id, out_lport, + pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for CPU port\n", err); + return err; + } + + return 0; +} + +static struct rocker_ctrl { + const u8 *eth_dst; + const u8 *eth_dst_mask; + __be16 eth_type; + bool acl; + bool bridge; + bool term; + bool copy_to_cpu; +} rocker_ctrls[] = { + [ROCKER_CTRL_LINK_LOCAL_MCAST] = { + /* pass link local multicast pkts up to CPU for filtering */ + .eth_dst = ll_mac, + .eth_dst_mask = ll_mask, + .acl = true, + }, + [ROCKER_CTRL_LOCAL_ARP] = { + /* pass local ARP pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .eth_type = htons(ETH_P_ARP), + .acl = true, + }, + [ROCKER_CTRL_IPV4_MCAST] = { + /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ + .eth_dst = ipv4_mcast, + .eth_dst_mask = ipv4_mask, + .eth_type = htons(ETH_P_IP), + .term = true, + .copy_to_cpu = true, + }, + [ROCKER_CTRL_IPV6_MCAST] = { + /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ + .eth_dst = ipv6_mcast, + .eth_dst_mask = ipv6_mask, + .eth_type = htons(ETH_P_IPV6), + .term = true, + .copy_to_cpu = true, + }, + [ROCKER_CTRL_DFLT_BRIDGING] = { + /* flood any pkts on vlan */ + .bridge = true, + .copy_to_cpu = true, + }, +}; + +static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, + int flags, struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + u32 in_lport = rocker_port->lport; + u32 in_lport_mask = 0xffffffff; + u32 out_lport = 0; + u8 *eth_src = NULL; + u8 *eth_src_mask = NULL; + __be16 vlan_id_mask = htons(0xffff); + u8 ip_proto = 0; + u8 ip_proto_mask = 0; + u8 ip_tos = 0; + u8 ip_tos_mask = 0; + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + int err; + + err = rocker_flow_tbl_acl(rocker_port, flags, + in_lport, in_lport_mask, + eth_src, eth_src_mask, + ctrl->eth_dst, ctrl->eth_dst_mask, + ctrl->eth_type, + vlan_id, vlan_id_mask, + ip_proto, ip_proto_mask, + ip_tos, ip_tos_mask, + group_id); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port, + int flags, struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 tunnel_id = 0; + int err; + + if (!rocker_port_is_bridged(rocker_port)) + return 0; + + err = rocker_flow_tbl_bridge(rocker_port, flags, + ctrl->eth_dst, ctrl->eth_dst_mask, + vlan_id, tunnel_id, + goto_tbl, group_id, ctrl->copy_to_cpu); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, + int flags, struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + u32 in_lport_mask = 0xffffffff; + __be16 vlan_id_mask = htons(0xffff); + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = rocker_port->internal_vlan_id; + + err = rocker_flow_tbl_term_mac(rocker_port, + rocker_port->lport, in_lport_mask, + ctrl->eth_type, ctrl->eth_dst, + ctrl->eth_dst_mask, vlan_id, + vlan_id_mask, ctrl->copy_to_cpu, + flags); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags, + struct rocker_ctrl *ctrl, __be16 vlan_id) +{ + if (ctrl->acl) + return rocker_port_ctrl_vlan_acl(rocker_port, flags, + ctrl, vlan_id); + if (ctrl->bridge) + return rocker_port_ctrl_vlan_bridge(rocker_port, flags, + ctrl, vlan_id); + + if (ctrl->term) + return rocker_port_ctrl_vlan_term(rocker_port, flags, + ctrl, vlan_id); + + return -EOPNOTSUPP; +} + +static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port, + int flags, __be16 vlan_id) +{ + int err = 0; + int i; + + for (i = 0; i < ROCKER_CTRL_MAX; i++) { + if (rocker_port->ctrls[i]) { + err = rocker_port_ctrl_vlan(rocker_port, flags, + &rocker_ctrls[i], vlan_id); + if (err) + return err; + } + } + + return err; +} + +static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags, + struct rocker_ctrl *ctrl) +{ + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, rocker_port->vlan_bitmap)) + continue; + err = rocker_port_ctrl_vlan(rocker_port, flags, + ctrl, htons(vid)); + if (err) + break; + } + + return err; +} + +static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, + u16 vid) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + u32 in_lport = rocker_port->lport; + __be16 vlan_id = htons(vid); + __be16 vlan_id_mask = htons(0xffff); + __be16 internal_vlan_id; + bool untagged; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + int err; + + internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged); + + if (adding && test_and_set_bit(ntohs(internal_vlan_id), + rocker_port->vlan_bitmap)) + return 0; /* already added */ + else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id), + rocker_port->vlan_bitmap)) + return 0; /* already removed */ + + if (adding) { + err = rocker_port_ctrl_vlan_add(rocker_port, flags, + internal_vlan_id); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port ctrl vlan add\n", err); + return err; + } + } + + err = rocker_port_vlan_l2_groups(rocker_port, flags, + internal_vlan_id, untagged); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 groups\n", err); + return err; + } + + err = rocker_port_vlan_flood_group(rocker_port, flags, + internal_vlan_id); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 flood group\n", err); + return err; + } + + err = rocker_flow_tbl_vlan(rocker_port, flags, + in_lport, vlan_id, vlan_id_mask, + goto_tbl, untagged, internal_vlan_id); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) port VLAN table\n", err); + + return err; +} + +static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags) +{ + enum rocker_of_dpa_table_id goto_tbl; + u32 in_lport; + u32 in_lport_mask; + int err; + + /* Normal Ethernet Frames. Matches pkts from any local physical + * ports. Goto VLAN tbl. + */ + + in_lport = 0; + in_lport_mask = 0xffff0000; + goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; + + err = rocker_flow_tbl_ig_port(rocker_port, flags, + in_lport, in_lport_mask, + goto_tbl); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) ingress port table entry\n", err); + + return err; +} + +struct rocker_fdb_learn_work { + struct work_struct work; + struct net_device *dev; + int flags; + u8 addr[ETH_ALEN]; + u16 vid; +}; + +static void rocker_port_fdb_learn_work(struct work_struct *work) +{ + struct rocker_fdb_learn_work *lw = + container_of(work, struct rocker_fdb_learn_work, work); + bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); + bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); + + if (learned && removing) + br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid); + else if (learned && !removing) + br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid); + + kfree(work); +} + +static int rocker_port_fdb_learn(struct rocker_port *rocker_port, + int flags, const u8 *addr, __be16 vlan_id) +{ + struct rocker_fdb_learn_work *lw; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 out_lport = rocker_port->lport; + u32 tunnel_id = 0; + u32 group_id = ROCKER_GROUP_NONE; + bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); + bool copy_to_cpu = false; + int err; + + if (rocker_port_is_bridged(rocker_port)) + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); + + if (!(flags & ROCKER_OP_FLAG_REFRESH)) { + err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL, + vlan_id, tunnel_id, goto_tbl, + group_id, copy_to_cpu); + if (err) + return err; + } + + if (!syncing) + return 0; + + if (!rocker_port_is_bridged(rocker_port)) + return 0; + + lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags)); + if (!lw) + return -ENOMEM; + + INIT_WORK(&lw->work, rocker_port_fdb_learn_work); + + lw->dev = rocker_port->dev; + lw->flags = flags; + ether_addr_copy(lw->addr, addr); + lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id); + + schedule_work(&lw->work); + + return 0; +} + +static struct rocker_fdb_tbl_entry * +rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match) +{ + struct rocker_fdb_tbl_entry *found; + + hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) + if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + return found; + + return NULL; +} + +static int rocker_port_fdb(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id, int flags) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *fdb; + struct rocker_fdb_tbl_entry *found; + bool removing = (flags & ROCKER_OP_FLAG_REMOVE); + unsigned long lock_flags; + + fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags)); + if (!fdb) + return -ENOMEM; + + fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); + fdb->key.lport = rocker_port->lport; + ether_addr_copy(fdb->key.addr, addr); + fdb->key.vlan_id = vlan_id; + fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + + found = rocker_fdb_tbl_find(rocker, fdb); + + if (removing && found) { + kfree(fdb); + hash_del(&found->entry); + } else if (!removing && !found) { + hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32); + } + + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + /* Check if adding and already exists, or removing and can't find */ + if (!found != !removing) { + kfree(fdb); + if (!found && removing) + return 0; + /* Refreshing existing to update aging timers */ + flags |= ROCKER_OP_FLAG_REFRESH; + } + + return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id); +} + +static int rocker_port_fdb_flush(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *found; + unsigned long lock_flags; + int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; + struct hlist_node *tmp; + int bkt; + int err = 0; + + if (rocker_port->stp_state == BR_STATE_LEARNING || + rocker_port->stp_state == BR_STATE_FORWARDING) + return 0; + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.lport != rocker_port->lport) + continue; + if (!found->learned) + continue; + err = rocker_port_fdb_learn(rocker_port, flags, + found->key.addr, + found->key.vlan_id); + if (err) + goto err_out; + hash_del(&found->entry); + } + +err_out: + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + return err; +} + +static int rocker_port_router_mac(struct rocker_port *rocker_port, + int flags, __be16 vlan_id) +{ + u32 in_lport_mask = 0xffffffff; + __be16 eth_type; + const u8 *dst_mac_mask = ff_mac; + __be16 vlan_id_mask = htons(0xffff); + bool copy_to_cpu = false; + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = rocker_port->internal_vlan_id; + + eth_type = htons(ETH_P_IP); + err = rocker_flow_tbl_term_mac(rocker_port, + rocker_port->lport, in_lport_mask, + eth_type, rocker_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + if (err) + return err; + + eth_type = htons(ETH_P_IPV6); + err = rocker_flow_tbl_term_mac(rocker_port, + rocker_port->lport, in_lport_mask, + eth_type, rocker_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + + return err; +} + +static int rocker_port_fwding(struct rocker_port *rocker_port) +{ + bool pop_vlan; + u32 out_lport; + __be16 vlan_id; + u16 vid; + int flags = ROCKER_OP_FLAG_NOWAIT; + int err; + + /* Port will be forwarding-enabled if its STP state is LEARNING + * or FORWARDING. Traffic from CPU can still egress, regardless of + * port STP state. Use L2 interface group on port VLANs as a way + * to toggle port forwarding: if forwarding is disabled, L2 + * interface group will not exist. + */ + + if (rocker_port->stp_state != BR_STATE_LEARNING && + rocker_port->stp_state != BR_STATE_FORWARDING) + flags |= ROCKER_OP_FLAG_REMOVE; + + out_lport = rocker_port->lport; + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, rocker_port->vlan_bitmap)) + continue; + vlan_id = htons(vid); + pop_vlan = rocker_vlan_id_is_internal(vlan_id); + err = rocker_group_l2_interface(rocker_port, flags, + vlan_id, out_lport, + pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for lport %d\n", + err, out_lport); + return err; + } + } + + return 0; +} + +static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state) +{ + bool want[ROCKER_CTRL_MAX] = { 0, }; + int flags; + int err; + int i; + + if (rocker_port->stp_state == state) + return 0; + + rocker_port->stp_state = state; + + switch (state) { + case BR_STATE_DISABLED: + /* port is completely disabled */ + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + break; + case BR_STATE_LEARNING: + case BR_STATE_FORWARDING: + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + want[ROCKER_CTRL_IPV4_MCAST] = true; + want[ROCKER_CTRL_IPV6_MCAST] = true; + if (rocker_port_is_bridged(rocker_port)) + want[ROCKER_CTRL_DFLT_BRIDGING] = true; + else + want[ROCKER_CTRL_LOCAL_ARP] = true; + break; + } + + for (i = 0; i < ROCKER_CTRL_MAX; i++) { + if (want[i] != rocker_port->ctrls[i]) { + flags = ROCKER_OP_FLAG_NOWAIT | + (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE); + err = rocker_port_ctrl(rocker_port, flags, + &rocker_ctrls[i]); + if (err) + return err; + rocker_port->ctrls[i] = want[i]; + } + } + + err = rocker_port_fdb_flush(rocker_port); + if (err) + return err; + + return rocker_port_fwding(rocker_port); +} + +static struct rocker_internal_vlan_tbl_entry * +rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex) +{ + struct rocker_internal_vlan_tbl_entry *found; + + hash_for_each_possible(rocker->internal_vlan_tbl, found, + entry, ifindex) { + if (found->ifindex == ifindex) + return found; + } + + return NULL; +} + +static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port, + int ifindex) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_internal_vlan_tbl_entry *entry; + struct rocker_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return 0; + + entry->ifindex = ifindex; + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); + + found = rocker_internal_vlan_tbl_find(rocker, ifindex); + if (found) { + kfree(entry); + goto found; + } + + found = entry; + hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); + + for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) { + if (test_and_set_bit(i, rocker->internal_vlan_bitmap)) + continue; + found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i); + goto found; + } + + netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n"); + +found: + found->ref_count++; + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); + + return found->vlan_id; +} + +static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port, + int ifindex) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + unsigned long bit; + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); + + found = rocker_internal_vlan_tbl_find(rocker, ifindex); + if (!found) { + netdev_err(rocker_port->dev, + "ifindex (%d) not found in internal VLAN tbl\n", + ifindex); + goto not_found; + } + + if (--found->ref_count <= 0) { + bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE; + clear_bit(bit, rocker->internal_vlan_bitmap); + hash_del(&found->entry); + kfree(found); + } + +not_found: + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); +} + +/***************** + * Net device ops + *****************/ + +static int rocker_port_open(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + u8 stp_state = rocker_port_is_bridged(rocker_port) ? + BR_STATE_BLOCKING : BR_STATE_FORWARDING; + int err; + + err = rocker_port_dma_rings_init(rocker_port); + if (err) + return err; + + err = request_irq(rocker_msix_tx_vector(rocker_port), + rocker_tx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign tx irq\n"); + goto err_request_tx_irq; + } + + err = request_irq(rocker_msix_rx_vector(rocker_port), + rocker_rx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign rx irq\n"); + goto err_request_rx_irq; + } + + err = rocker_port_stp_update(rocker_port, stp_state); + if (err) + goto err_stp_update; + + napi_enable(&rocker_port->napi_tx); + napi_enable(&rocker_port->napi_rx); + rocker_port_set_enable(rocker_port, true); + netif_start_queue(dev); + return 0; + +err_stp_update: + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); +err_request_rx_irq: + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); +err_request_tx_irq: + rocker_port_dma_rings_fini(rocker_port); + return err; +} + +static int rocker_port_stop(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + netif_stop_queue(dev); + rocker_port_set_enable(rocker_port, false); + napi_disable(&rocker_port->napi_rx); + napi_disable(&rocker_port->napi_tx); + rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); + rocker_port_dma_rings_fini(rocker_port); + + return 0; +} + +static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; + struct rocker_tlv *attr; + int rem; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); + if (!attrs[ROCKER_TLV_TX_FRAGS]) + return; + rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { + struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; + dma_addr_t dma_handle; + size_t len; + + if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) + continue; + rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, + attr); + if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || + !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) + continue; + dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); + len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); + pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); + } +} + +static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + char *buf, size_t buf_len) +{ + struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + struct rocker_tlv *frag; + + dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); + return -EIO; + } + frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); + if (!frag) + goto unmap_frag; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, + dma_handle)) + goto nest_cancel; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, + buf_len)) + goto nest_cancel; + rocker_tlv_nest_end(desc_info, frag); + return 0; + +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frag); +unmap_frag: + pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); + return -EMSGSIZE; +} + +static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_tlv *frags; + int i; + int err; + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (unlikely(!desc_info)) { + if (net_ratelimit()) + netdev_err(dev, "tx ring full when queue awake\n"); + return NETDEV_TX_BUSY; + } + + rocker_desc_cookie_ptr_set(desc_info, skb); + + frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); + if (!frags) + goto out; + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb->data, skb_headlen(skb)); + if (err) + goto nest_cancel; + if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) + goto nest_cancel; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb_frag_address(frag), + skb_frag_size(frag)); + if (err) + goto unmap_frags; + } + rocker_tlv_nest_end(desc_info, frags); + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (!desc_info) + netif_stop_queue(dev); + + return NETDEV_TX_OK; + +unmap_frags: + rocker_tx_desc_frags_unmap(rocker_port, desc_info); +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frags); +out: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int rocker_port_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); + if (err) + return err; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +static int rocker_port_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_vlan(rocker_port, 0, vid); + if (err) + return err; + + return rocker_port_router_mac(rocker_port, 0, htons(vid)); +} + +static int rocker_port_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE, + htons(vid)); + if (err) + return err; + + return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid); +} + +static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 nlm_flags) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL); + int flags = 0; + + if (!rocker_port_is_bridged(rocker_port)) + return -EINVAL; + + return rocker_port_fdb(rocker_port, addr, vlan_id, flags); +} + +static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL); + int flags = ROCKER_OP_FLAG_REMOVE; + + if (!rocker_port_is_bridged(rocker_port)) + return -EINVAL; + + return rocker_port_fdb(rocker_port, addr, vlan_id, flags); +} + +static int rocker_fdb_fill_info(struct sk_buff *skb, + struct rocker_port *rocker_port, + const unsigned char *addr, u16 vid, + u32 portid, u32 seq, int type, + unsigned int flags) +{ + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = rocker_port->dev->ifindex; + ndm->ndm_state = NUD_REACHABLE; + + if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) + goto nla_put_failure; + + if (vid && nla_put_u16(skb, NDA_VLAN, vid)) + goto nla_put_failure; + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int rocker_port_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, + int idx) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *found; + struct hlist_node *tmp; + int bkt; + unsigned long lock_flags; + const unsigned char *addr; + u16 vid; + int err; + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.lport != rocker_port->lport) + continue; + if (idx < cb->args[0]) + goto skip; + addr = found->key.addr; + vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id); + err = rocker_fdb_fill_info(skb, rocker_port, addr, vid, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, NLM_F_MULTI); + if (err < 0) + break; +skip: + ++idx; + } + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + return idx; +} + +static int rocker_port_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct nlattr *protinfo; + struct nlattr *attr; + int err; + + protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), + IFLA_PROTINFO); + if (protinfo) { + attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING); + if (attr) { + if (nla_len(attr) < sizeof(u8)) + return -EINVAL; + + if (nla_get_u8(attr)) + rocker_port->brport_flags |= BR_LEARNING; + else + rocker_port->brport_flags &= ~BR_LEARNING; + err = rocker_port_set_learning(rocker_port); + if (err) + return err; + } + attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC); + if (attr) { + if (nla_len(attr) < sizeof(u8)) + return -EINVAL; + + if (nla_get_u8(attr)) + rocker_port->brport_flags |= BR_LEARNING_SYNC; + else + rocker_port->brport_flags &= ~BR_LEARNING_SYNC; + } + } + + return 0; +} + +static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + u16 mode = BRIDGE_MODE_UNDEF; + u32 mask = BR_LEARNING | BR_LEARNING_SYNC; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, + rocker_port->brport_flags, mask); +} + +static int rocker_port_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + + psid->id_len = sizeof(rocker->hw.id); + memcpy(&psid->id, &rocker->hw.id, psid->id_len); + return 0; +} + +static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_port_stp_update(rocker_port, state); +} + +static const struct net_device_ops rocker_port_netdev_ops = { + .ndo_open = rocker_port_open, + .ndo_stop = rocker_port_stop, + .ndo_start_xmit = rocker_port_xmit, + .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid, + .ndo_fdb_add = rocker_port_fdb_add, + .ndo_fdb_del = rocker_port_fdb_del, + .ndo_fdb_dump = rocker_port_fdb_dump, + .ndo_bridge_setlink = rocker_port_bridge_setlink, + .ndo_bridge_getlink = rocker_port_bridge_getlink, + .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get, + .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update, +}; + +/******************** + * ethtool interface + ********************/ + +static int rocker_port_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); +} + +static int rocker_port_set_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); +} + +static void rocker_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static const struct ethtool_ops rocker_port_ethtool_ops = { + .get_settings = rocker_port_get_settings, + .set_settings = rocker_port_set_settings, + .get_drvinfo = rocker_port_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +/***************** + * NAPI interface + *****************/ + +static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_tx); +} + +static int rocker_port_poll_tx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Cleanup tx descriptors */ + while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { + err = rocker_desc_err(desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "tx desc received with err %d\n", + err); + rocker_tx_desc_frags_unmap(rocker_port, desc_info); + dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info)); + credits++; + } + + if (credits && netif_queue_stopped(rocker_port->dev)) + netif_wake_queue(rocker_port->dev); + + napi_complete(napi); + rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); + + return 0; +} + +static int rocker_port_rx_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + size_t rx_len; + + if (!skb) + return -ENOENT; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) + return -EINVAL; + + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + + rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); + skb_put(skb, rx_len); + skb->protocol = eth_type_trans(skb, rocker_port->dev); + netif_receive_skb(skb); + + return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info); +} + +static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_rx); +} + +static int rocker_port_poll_rx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Process rx descriptors */ + while (credits < budget && + (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "rx desc received with err %d\n", + err); + } else { + err = rocker_port_rx_proc(rocker, rocker_port, + desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "rx processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); + credits++; + } + + if (credits < budget) + napi_complete(napi); + + rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); + + return credits; +} + +/***************** + * PCI driver ops + *****************/ + +static void rocker_carrier_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); + bool link_up; + + link_up = link_status & (1 << rocker_port->lport); + if (link_up) + netif_carrier_on(rocker_port->dev); + else + netif_carrier_off(rocker_port->dev); +} + +static void rocker_remove_ports(struct rocker *rocker) +{ + struct rocker_port *rocker_port; + int i; + + for (i = 0; i < rocker->port_count; i++) { + rocker_port = rocker->ports[i]; + rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE); + unregister_netdev(rocker_port->dev); + } + kfree(rocker->ports); +} + +static void rocker_port_dev_addr_init(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_cmd_get_port_settings_macaddr(rocker_port, + rocker_port->dev->dev_addr); + if (err) { + dev_warn(&pdev->dev, "failed to get mac address, using random\n"); + eth_hw_addr_random(rocker_port->dev); + } +} + +static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) +{ + struct pci_dev *pdev = rocker->pdev; + struct rocker_port *rocker_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct rocker_port)); + if (!dev) + return -ENOMEM; + rocker_port = netdev_priv(dev); + rocker_port->dev = dev; + rocker_port->rocker = rocker; + rocker_port->port_number = port_number; + rocker_port->lport = port_number + 1; + rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; + + rocker_port_dev_addr_init(rocker, rocker_port); + dev->netdev_ops = &rocker_port_netdev_ops; + dev->ethtool_ops = &rocker_port_ethtool_ops; + netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, + NAPI_POLL_WEIGHT); + netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, + NAPI_POLL_WEIGHT); + rocker_carrier_init(rocker_port); + + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "register_netdev failed\n"); + goto err_register_netdev; + } + rocker->ports[port_number] = rocker_port; + + rocker_port_set_learning(rocker_port); + + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); + err = rocker_port_ig_tbl(rocker_port, 0); + if (err) { + dev_err(&pdev->dev, "install ig port table failed\n"); + goto err_port_ig_tbl; + } + + return 0; + +err_port_ig_tbl: + unregister_netdev(dev); +err_register_netdev: + free_netdev(dev); + return err; +} + +static int rocker_probe_ports(struct rocker *rocker) +{ + int i; + size_t alloc_size; + int err; + + alloc_size = sizeof(struct rocker_port *) * rocker->port_count; + rocker->ports = kmalloc(alloc_size, GFP_KERNEL); + for (i = 0; i < rocker->port_count; i++) { + err = rocker_probe_port(rocker, i); + if (err) + goto remove_ports; + } + return 0; + +remove_ports: + rocker_remove_ports(rocker); + return err; +} + +static int rocker_msix_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int msix_entries; + int i; + int err; + + msix_entries = pci_msix_vec_count(pdev); + if (msix_entries < 0) + return msix_entries; + + if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) + return -EINVAL; + + rocker->msix_entries = kmalloc_array(msix_entries, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!rocker->msix_entries) + return -ENOMEM; + + for (i = 0; i < msix_entries; i++) + rocker->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); + if (err < 0) + goto err_enable_msix; + + return 0; + +err_enable_msix: + kfree(rocker->msix_entries); + return err; +} + +static void rocker_msix_fini(struct rocker *rocker) +{ + pci_disable_msix(rocker->pdev); + kfree(rocker->msix_entries); +} + +static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct rocker *rocker; + int err; + + rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); + if (!rocker) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, rocker_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } + + if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { + dev_err(&pdev->dev, "invalid PCI region size\n"); + goto err_pci_resource_len_check; + } + + rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rocker->hw_addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto err_ioremap; + } + pci_set_master(pdev); + + rocker->pdev = pdev; + pci_set_drvdata(pdev, rocker); + + rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); + + err = rocker_msix_init(rocker); + if (err) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_msix_init; + } + + err = rocker_basic_hw_test(rocker); + if (err) { + dev_err(&pdev->dev, "basic hw test failed\n"); + goto err_basic_hw_test; + } + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + + err = rocker_dma_rings_init(rocker); + if (err) + goto err_dma_rings_init; + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), + rocker_cmd_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign cmd irq\n"); + goto err_request_cmd_irq; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), + rocker_event_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign event irq\n"); + goto err_request_event_irq; + } + + rocker->hw.id = rocker_read64(rocker, SWITCH_ID); + + err = rocker_init_tbls(rocker); + if (err) { + dev_err(&pdev->dev, "cannot init rocker tables\n"); + goto err_init_tbls; + } + + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + + dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id); + + return 0; + +err_probe_ports: + rocker_free_tbls(rocker); +err_init_tbls: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); +err_request_event_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); +err_request_cmd_irq: + rocker_dma_rings_fini(rocker); +err_dma_rings_init: +err_basic_hw_test: + rocker_msix_fini(rocker); +err_msix_init: + iounmap(rocker->hw_addr); +err_ioremap: +err_pci_resource_len_check: +err_pci_set_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: + pci_disable_device(pdev); +err_pci_enable_device: + kfree(rocker); + return err; +} + +static void rocker_remove(struct pci_dev *pdev) +{ + struct rocker *rocker = pci_get_drvdata(pdev); + + rocker_free_tbls(rocker); + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + rocker_remove_ports(rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); + rocker_dma_rings_fini(rocker); + rocker_msix_fini(rocker); + iounmap(rocker->hw_addr); + pci_release_regions(rocker->pdev); + pci_disable_device(rocker->pdev); + kfree(rocker); +} + +static struct pci_driver rocker_pci_driver = { + .name = rocker_driver_name, + .id_table = rocker_pci_id_table, + .probe = rocker_probe, + .remove = rocker_remove, +}; + +/************************************ + * Net device notifier event handler + ************************************/ + +static bool rocker_port_dev_check(struct net_device *dev) +{ + return dev->netdev_ops == &rocker_port_netdev_ops; +} + +static int rocker_port_bridge_join(struct rocker_port *rocker_port, + struct net_device *bridge) +{ + int err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->dev->ifindex); + + rocker_port->bridge_dev = bridge; + + /* Use bridge internal VLAN ID for untagged pkts */ + err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0); + if (err) + return err; + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, + bridge->ifindex); + err = rocker_port_vlan(rocker_port, 0, 0); + + return err; +} + +static int rocker_port_bridge_leave(struct rocker_port *rocker_port) +{ + int err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->bridge_dev->ifindex); + + rocker_port->bridge_dev = NULL; + + /* Use port internal VLAN ID for untagged pkts */ + err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0); + if (err) + return err; + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, + rocker_port->dev->ifindex); + err = rocker_port_vlan(rocker_port, 0, 0); + + return err; +} + +static int rocker_port_master_changed(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct net_device *master = netdev_master_upper_dev_get(dev); + int err = 0; + + if (master && master->rtnl_link_ops && + !strcmp(master->rtnl_link_ops->kind, "bridge")) + err = rocker_port_bridge_join(rocker_port, master); + else + err = rocker_port_bridge_leave(rocker_port); + + return err; +} + +static int rocker_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev; + int err; + + switch (event) { + case NETDEV_CHANGEUPPER: + dev = netdev_notifier_info_to_dev(ptr); + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + err = rocker_port_master_changed(dev); + if (err) + netdev_warn(dev, + "failed to reflect master change (err %d)\n", + err); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netdevice_nb __read_mostly = { + .notifier_call = rocker_netdevice_event, +}; + +/*********************** + * Module init and exit + ***********************/ + +static int __init rocker_module_init(void) +{ + int err; + + register_netdevice_notifier(&rocker_netdevice_nb); + err = pci_register_driver(&rocker_pci_driver); + if (err) + goto err_pci_register_driver; + return 0; + +err_pci_register_driver: + unregister_netdevice_notifier(&rocker_netdevice_nb); + return err; +} + +static void __exit rocker_module_exit(void) +{ + unregister_netdevice_notifier(&rocker_netdevice_nb); + pci_unregister_driver(&rocker_pci_driver); +} + +module_init(rocker_module_init); +module_exit(rocker_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_AUTHOR("Scott Feldman "); +MODULE_DESCRIPTION("Rocker switch device driver"); +MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h new file mode 100644 index 0000000000000000000000000000000000000000..8d2865ba634c7e8babe6ca8e2f3ac05acaeaff41 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker.h @@ -0,0 +1,428 @@ +/* + * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver + * Copyright (c) 2014 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_H +#define _ROCKER_H + +#include + +#define PCI_VENDOR_ID_REDHAT 0x1b36 +#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 + +#define ROCKER_PCI_BAR0_SIZE 0x2000 + +/* MSI-X vectors */ +enum { + ROCKER_MSIX_VEC_CMD, + ROCKER_MSIX_VEC_EVENT, + ROCKER_MSIX_VEC_TEST, + ROCKER_MSIX_VEC_RESERVED0, + __ROCKER_MSIX_VEC_TX, + __ROCKER_MSIX_VEC_RX, +#define ROCKER_MSIX_VEC_TX(port) \ + (__ROCKER_MSIX_VEC_TX + ((port) * 2)) +#define ROCKER_MSIX_VEC_RX(port) \ + (__ROCKER_MSIX_VEC_RX + ((port) * 2)) +#define ROCKER_MSIX_VEC_COUNT(portcnt) \ + (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) +}; + +/* Rocker bogus registers */ +#define ROCKER_BOGUS_REG0 0x0000 +#define ROCKER_BOGUS_REG1 0x0004 +#define ROCKER_BOGUS_REG2 0x0008 +#define ROCKER_BOGUS_REG3 0x000c + +/* Rocker test registers */ +#define ROCKER_TEST_REG 0x0010 +#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ +#define ROCKER_TEST_IRQ 0x0020 +#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ +#define ROCKER_TEST_DMA_SIZE 0x0030 +#define ROCKER_TEST_DMA_CTRL 0x0034 + +/* Rocker test register ctrl */ +#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0) +#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1) +#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2) + +/* Rocker DMA ring register offsets */ +#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ +#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) +#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) +#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) +#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) +#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) +#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) + +/* Rocker dma ctrl register bits */ +#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0) + +/* Rocker DMA ring types */ +enum rocker_dma_type { + ROCKER_DMA_CMD, + ROCKER_DMA_EVENT, + __ROCKER_DMA_TX, + __ROCKER_DMA_RX, +#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) +#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) +}; + +/* Rocker DMA ring size limits and default sizes */ +#define ROCKER_DMA_SIZE_MIN 2ul +#define ROCKER_DMA_SIZE_MAX 65536ul +#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul +#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul +#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_TX_DESC_SIZE 256 +#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_RX_DESC_SIZE 256 + +/* Rocker DMA descriptor struct */ +struct rocker_desc { + u64 buf_addr; + u64 cookie; + u16 buf_size; + u16 tlv_size; + u16 resv[5]; + u16 comp_err; +}; + +#define ROCKER_DMA_DESC_COMP_ERR_GEN (1 << 15) + +/* Rocker DMA TLV struct */ +struct rocker_tlv { + u32 type; + u16 len; +}; + +/* TLVs */ +enum { + ROCKER_TLV_CMD_UNSPEC, + ROCKER_TLV_CMD_TYPE, /* u16 */ + ROCKER_TLV_CMD_INFO, /* nest */ + + __ROCKER_TLV_CMD_MAX, + ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_TYPE_UNSPEC, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, + + __ROCKER_TLV_CMD_TYPE_MAX, + ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, + ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ + + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + ROCKER_TLV_CMD_PORT_SETTINGS_MAX = + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, +}; + +enum rocker_port_mode { + ROCKER_PORT_MODE_OF_DPA, +}; + +enum { + ROCKER_TLV_EVENT_UNSPEC, + ROCKER_TLV_EVENT_TYPE, /* u16 */ + ROCKER_TLV_EVENT_INFO, /* nest */ + + __ROCKER_TLV_EVENT_MAX, + ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_TYPE_UNSPEC, + ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, + ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, + + __ROCKER_TLV_EVENT_TYPE_MAX, + ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, + ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ + + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, + ROCKER_TLV_EVENT_LINK_CHANGED_MAX = + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, + ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ + ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ + + __ROCKER_TLV_EVENT_MAC_VLAN_MAX, + ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, +}; + +enum { + ROCKER_TLV_RX_UNSPEC, + ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ + ROCKER_TLV_RX_CSUM, /* u16 */ + ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ + ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ + ROCKER_TLV_RX_FRAG_LEN, /* u16 */ + + __ROCKER_TLV_RX_MAX, + ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, +}; + +#define ROCKER_RX_FLAGS_IPV4 (1 << 0) +#define ROCKER_RX_FLAGS_IPV6 (1 << 1) +#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2) +#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3) +#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4) +#define ROCKER_RX_FLAGS_TCP (1 << 5) +#define ROCKER_RX_FLAGS_UDP (1 << 6) +#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7) + +enum { + ROCKER_TLV_TX_UNSPEC, + ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ + ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ + ROCKER_TLV_TX_TSO_MSS, /* u16 */ + ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ + ROCKER_TLV_TX_FRAGS, /* array */ + + __ROCKER_TLV_TX_MAX, + ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, +}; + +#define ROCKER_TX_OFFLOAD_NONE 0 +#define ROCKER_TX_OFFLOAD_IP_CSUM 1 +#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 +#define ROCKER_TX_OFFLOAD_L3_CSUM 3 +#define ROCKER_TX_OFFLOAD_TSO 4 + +#define ROCKER_TX_FRAGS_MAX 16 + +enum { + ROCKER_TLV_TX_FRAG_UNSPEC, + ROCKER_TLV_TX_FRAG, /* nest */ + + __ROCKER_TLV_TX_FRAG_MAX, + ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, +}; + +enum { + ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, + ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ + ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ + + __ROCKER_TLV_TX_FRAG_ATTR_MAX, + ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, +}; + +/* cmd info nested for OF-DPA msgs */ +enum { + ROCKER_TLV_OF_DPA_UNSPEC, + ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ + ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ + ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ + ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ + ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ + ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ + ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ + ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ + ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ + ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ + ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ + ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ + ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ + ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ + + __ROCKER_TLV_OF_DPA_MAX, + ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, +}; + +/* OF-DPA table IDs */ + +enum rocker_of_dpa_table_id { + ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, + ROCKER_OF_DPA_TABLE_ID_VLAN = 10, + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, + ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, +}; + +/* OF-DPA flow stats */ +enum { + ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, + ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ + + __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, + ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, +}; + +/* OF-DPA group types */ +enum rocker_of_dpa_group_type { + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, + ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, + ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, + ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, + ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, +}; + +/* OF-DPA group L2 overlay types */ +enum rocker_of_dpa_overlay_type { + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, +}; + +/* OF-DPA group ID encoding */ +#define ROCKER_GROUP_TYPE_SHIFT 28 +#define ROCKER_GROUP_TYPE_MASK 0xf0000000 +#define ROCKER_GROUP_VLAN_SHIFT 16 +#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 +#define ROCKER_GROUP_PORT_SHIFT 0 +#define ROCKER_GROUP_PORT_MASK 0x0000ffff +#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 +#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 +#define ROCKER_GROUP_SUBTYPE_SHIFT 10 +#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 +#define ROCKER_GROUP_INDEX_SHIFT 0 +#define ROCKER_GROUP_INDEX_MASK 0x0000ffff +#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 +#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff + +#define ROCKER_GROUP_TYPE_GET(group_id) \ + (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) +#define ROCKER_GROUP_TYPE_SET(type) \ + (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) +#define ROCKER_GROUP_VLAN_GET(group_id) \ + (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) +#define ROCKER_GROUP_VLAN_SET(vlan_id) \ + (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) +#define ROCKER_GROUP_PORT_GET(group_id) \ + (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) +#define ROCKER_GROUP_PORT_SET(port) \ + (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) +#define ROCKER_GROUP_INDEX_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) +#define ROCKER_GROUP_INDEX_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) +#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ + ROCKER_GROUP_INDEX_LONG_SHIFT) +#define ROCKER_GROUP_INDEX_LONG_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ + ROCKER_GROUP_INDEX_LONG_MASK) + +#define ROCKER_GROUP_NONE 0 +#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) +#define ROCKER_GROUP_L2_REWRITE(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) +#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L3_UNICAST(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) + +/* Rocker general purpose registers */ +#define ROCKER_CONTROL 0x0300 +#define ROCKER_PORT_PHYS_COUNT 0x0304 +#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ +#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ +#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ + +/* Rocker control bits */ +#define ROCKER_CONTROL_RESET (1 << 0) + +#endif diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index b147d469a7998ee49cf7d02199d038bf58aac7c0..7fd6e275d1c20bf3208c1b5f09061bac82567ad6 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -90,9 +90,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev) /* Get memory resource */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - goto err_out; - addr = devm_ioremap_resource(dev, res); if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index a77f05ce832596d0c8eb10ef3a9735f48ba64b55..fbb6cfa0f5f1d634c22eee11d3d62ca52bfef96e 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3689,6 +3689,11 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .ptp_write_host_time = efx_ef10_ptp_write_host_time, .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, + .sriov_init = efx_ef10_sriov_init, + .sriov_fini = efx_ef10_sriov_fini, + .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed, + .sriov_wanted = efx_ef10_sriov_wanted, + .sriov_reset = efx_ef10_sriov_reset, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b2cc590dd1ddfb516381920af3b20cc24435dd7a..238482495e81fa3111554fc1d98fd0209e1499a8 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1314,7 +1314,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) /* If RSS is requested for the PF *and* VFs then we can't write RSS * table entries that are inaccessible to VFs */ - if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 && + if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && count > efx_vf_size(efx)) { netif_warn(efx, probe, efx->net_dev, "Reducing number of RSS channels from %u to %u for " @@ -1426,7 +1426,9 @@ static int efx_probe_interrupts(struct efx_nic *efx) } /* RSS might be usable on VFs even if it is disabled on the PF */ - efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? + + efx->rss_spread = ((efx->n_rx_channels > 1 || + !efx->type->sriov_wanted(efx)) ? efx->n_rx_channels : efx_vf_size(efx)); return 0; @@ -1614,7 +1616,7 @@ static int efx_probe_nic(struct efx_nic *efx) goto fail2; if (efx->n_channels > 1) - get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); + netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) efx->rx_indir_table[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); @@ -2166,7 +2168,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) } ether_addr_copy(net_dev->dev_addr, new_addr); - efx_sriov_mac_address_changed(efx); + efx->type->sriov_mac_address_changed(efx); /* Reconfigure the MAC */ mutex_lock(&efx->mac_lock); @@ -2210,10 +2212,10 @@ static const struct net_device_ops efx_farch_netdev_ops = { .ndo_set_rx_mode = efx_set_rx_mode, .ndo_set_features = efx_set_features, #ifdef CONFIG_SFC_SRIOV - .ndo_set_vf_mac = efx_sriov_set_vf_mac, - .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, - .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, - .ndo_get_vf_config = efx_sriov_get_vf_config, + .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac, + .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan, + .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, + .ndo_get_vf_config = efx_siena_sriov_get_vf_config, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, @@ -2433,7 +2435,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) if (rc) goto fail; efx_restore_filters(efx); - efx_sriov_reset(efx); + efx->type->sriov_reset(efx); mutex_unlock(&efx->mac_lock); @@ -2826,7 +2828,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_disable_interrupts(efx); rtnl_unlock(); - efx_sriov_fini(efx); + efx->type->sriov_fini(efx); efx_unregister_netdev(efx); efx_mtd_remove(efx); @@ -3023,7 +3025,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev, if (rc) goto fail4; - rc = efx_sriov_init(efx); + rc = efx->type->sriov_init(efx); if (rc) netif_err(efx, probe, efx->net_dev, "SR-IOV can't be enabled rc %d\n", rc); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index cad258a787088b44074bbb37c5a0eeb2686eeb7a..4835bc0d0de87a125cbe1d1b0ece01d4f3bd280f 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1086,19 +1086,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) 0 : ARRAY_SIZE(efx->rx_indir_table)); } -static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key) +static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc) { struct efx_nic *efx = netdev_priv(net_dev); - memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); return 0; } -static int efx_ethtool_set_rxfh(struct net_device *net_dev, - const u32 *indir, const u8 *key) +static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, + const u8 *key, const u8 hfunc) { struct efx_nic *efx = netdev_priv(net_dev); + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); efx->type->rx_push_rss_config(efx); return 0; diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 157037546d3067c4fe79c7f38d981cee235aa76a..f166c8ef38a3073a70fa436b2ba4865fb7575d1f 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2766,6 +2766,11 @@ const struct efx_nic_type falcon_a1_nic_type = { .mtd_write = falcon_mtd_write, .mtd_sync = falcon_mtd_sync, #endif + .sriov_init = efx_falcon_sriov_init, + .sriov_fini = efx_falcon_sriov_fini, + .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, + .sriov_wanted = efx_falcon_sriov_wanted, + .sriov_reset = efx_falcon_sriov_reset, .revision = EFX_REV_FALCON_A1, .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, @@ -2862,6 +2867,11 @@ const struct efx_nic_type falcon_b0_nic_type = { .mtd_write = falcon_mtd_write, .mtd_sync = falcon_mtd_sync, #endif + .sriov_init = efx_falcon_sriov_init, + .sriov_fini = efx_falcon_sriov_fini, + .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, + .sriov_wanted = efx_falcon_sriov_wanted, + .sriov_reset = efx_falcon_sriov_reset, .revision = EFX_REV_FALCON_B0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 6859437b59fbb688e7e9188b96bdf5a53811a767..75975328e0206ce94ca091b3b22201f43dfe02f4 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -226,6 +226,9 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer, unsigned int len) { +#ifdef CONFIG_SFC_SRIOV + struct siena_nic_data *nic_data = efx->nic_data; +#endif len = ALIGN(len, EFX_BUF_SIZE); if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) @@ -237,8 +240,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, buffer->index = efx->next_buffer_table; efx->next_buffer_table += buffer->entries; #ifdef CONFIG_SFC_SRIOV - BUG_ON(efx_sriov_enabled(efx) && - efx->vf_buftbl_base < efx->next_buffer_table); + BUG_ON(efx_siena_sriov_enabled(efx) && + nic_data->vf_buftbl_base < efx->next_buffer_table); #endif netif_dbg(efx, probe, efx->net_dev, @@ -667,7 +670,7 @@ static int efx_farch_do_flush(struct efx_nic *efx) * the firmware (though we will still have to poll for * completion). If that fails, fall back to the old scheme. */ - if (efx_sriov_enabled(efx)) { + if (efx_siena_sriov_enabled(efx)) { rc = efx_mcdi_flush_rxqs(efx); if (!rc) goto wait; @@ -1195,13 +1198,13 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", channel->channel, ev_sub_data); efx_farch_handle_tx_flush_done(efx, event); - efx_sriov_tx_flush_done(efx, event); + efx_siena_sriov_tx_flush_done(efx, event); break; case FSE_AZ_RX_DESCQ_FLS_DONE_EV: netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", channel->channel, ev_sub_data); efx_farch_handle_rx_flush_done(efx, event); - efx_sriov_rx_flush_done(efx, event); + efx_siena_sriov_rx_flush_done(efx, event); break; case FSE_AZ_EVQ_INIT_DONE_EV: netif_dbg(efx, hw, efx->net_dev, @@ -1240,7 +1243,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } else - efx_sriov_desc_fetch_err(efx, ev_sub_data); + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); break; case FSE_BZ_TX_DSC_ERROR_EV: if (ev_sub_data < EFX_VI_BASE) { @@ -1250,7 +1253,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } else - efx_sriov_desc_fetch_err(efx, ev_sub_data); + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); break; default: netif_vdbg(efx, hw, efx->net_dev, @@ -1315,7 +1318,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) efx_farch_handle_driver_event(channel, &event); break; case FSE_CZ_EV_CODE_USER_EV: - efx_sriov_event(channel, &event); + efx_siena_sriov_event(channel, &event); break; case FSE_CZ_EV_CODE_MCDI_EV: efx_mcdi_process_event(channel, &event); @@ -1668,6 +1671,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) { unsigned vi_count, buftbl_min; +#ifdef CONFIG_SFC_SRIOV + struct siena_nic_data *nic_data = efx->nic_data; +#endif + /* Account for the buffer table entries backing the datapath channels * and the descriptor caches for those channels. */ @@ -1678,10 +1685,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); #ifdef CONFIG_SFC_SRIOV - if (efx_sriov_wanted(efx)) { + if (efx->type->sriov_wanted(efx)) { unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; - efx->vf_buftbl_base = buftbl_min; + nic_data->vf_buftbl_base = buftbl_min; vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; vi_count = max(vi_count, EFX_VI_BASE); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 5239cf9bdc567f4f6779c10478a623ed9a36f505..d37928f01949d1473938011a23635a8e5a71a8a8 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1035,7 +1035,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, /* MAC stats are gather lazily. We can ignore this. */ break; case MCDI_EVENT_CODE_FLR: - efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); + efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); break; case MCDI_EVENT_CODE_PTP_RX: case MCDI_EVENT_CODE_PTP_FAULT: diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9ede32064685fd62834d0626156a2bab8a90c291..325dd94bca465ef99efe00b9d829b63c1abab3a0 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -913,13 +913,6 @@ struct vfdi_status; * @vf_count: Number of VFs intended to be enabled. * @vf_init_count: Number of VFs that have been fully initialised. * @vi_scale: log2 number of vnics per VF. - * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. - * @vfdi_status: Common VFDI status page to be dmad to VF address space. - * @local_addr_list: List of local addresses. Protected by %local_lock. - * @local_page_list: List of DMA addressable pages used to broadcast - * %local_addr_list. Protected by %local_lock. - * @local_lock: Mutex protecting %local_addr_list and %local_page_list. - * @peer_work: Work item to broadcast peer addresses to VMs. * @ptp_data: PTP state data * @vpd_sn: Serial number read from VPD * @monitor_work: Hardware monitor workitem @@ -1060,17 +1053,10 @@ struct efx_nic { wait_queue_head_t flush_wq; #ifdef CONFIG_SFC_SRIOV - struct efx_channel *vfdi_channel; struct efx_vf *vf; unsigned vf_count; unsigned vf_init_count; unsigned vi_scale; - unsigned vf_buftbl_base; - struct efx_buffer vfdi_status; - struct list_head local_addr_list; - struct list_head local_page_list; - struct mutex local_lock; - struct work_struct peer_work; #endif struct efx_ptp_data *ptp_data; @@ -1344,6 +1330,11 @@ struct efx_nic_type { int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); int (*ptp_set_ts_config)(struct efx_nic *efx, struct hwtstamp_config *init); + int (*sriov_init)(struct efx_nic *efx); + void (*sriov_fini)(struct efx_nic *efx); + void (*sriov_mac_address_changed)(struct efx_nic *efx); + bool (*sriov_wanted)(struct efx_nic *efx); + void (*sriov_reset)(struct efx_nic *efx); int revision; unsigned int txd_ptr_tbl_base; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index f77cce034ad45ab93e0fc8f6b9ee2b49f9aba320..93d10cbbd1cfa9cd419f419d1420ffa40bc7da26 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -378,12 +378,30 @@ enum { /** * struct siena_nic_data - Siena NIC state + * @efx: Pointer back to main interface structure * @wol_filter_id: Wake-on-LAN packet filter id * @stats: Hardware statistics + * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. + * @vfdi_status: Common VFDI status page to be dmad to VF address space. + * @local_addr_list: List of local addresses. Protected by %local_lock. + * @local_page_list: List of DMA addressable pages used to broadcast + * %local_addr_list. Protected by %local_lock. + * @local_lock: Mutex protecting %local_addr_list and %local_page_list. + * @peer_work: Work item to broadcast peer addresses to VMs. */ struct siena_nic_data { + struct efx_nic *efx; int wol_filter_id; u64 stats[SIENA_STAT_COUNT]; +#ifdef CONFIG_SFC_SRIOV + struct efx_channel *vfdi_channel; + unsigned vf_buftbl_base; + struct efx_buffer vfdi_status; + struct list_head local_addr_list; + struct list_head local_page_list; + struct mutex local_lock; + struct work_struct peer_work; +#endif }; enum { @@ -522,62 +540,88 @@ struct efx_ef10_nic_data { #ifdef CONFIG_SFC_SRIOV -static inline bool efx_sriov_wanted(struct efx_nic *efx) +/* SIENA */ +static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return efx->vf_count != 0; } -static inline bool efx_sriov_enabled(struct efx_nic *efx) + +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return efx->vf_init_count != 0; } + static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 1 << efx->vi_scale; } int efx_init_sriov(void); -void efx_sriov_probe(struct efx_nic *efx); -int efx_sriov_init(struct efx_nic *efx); -void efx_sriov_mac_address_changed(struct efx_nic *efx); -void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); -void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); -void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event); -void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); -void efx_sriov_flr(struct efx_nic *efx, unsigned flr); -void efx_sriov_reset(struct efx_nic *efx); -void efx_sriov_fini(struct efx_nic *efx); +void efx_siena_sriov_probe(struct efx_nic *efx); +int efx_siena_sriov_init(struct efx_nic *efx); +void efx_siena_sriov_mac_address_changed(struct efx_nic *efx); +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); +void efx_siena_sriov_reset(struct efx_nic *efx); +void efx_siena_sriov_fini(struct efx_nic *efx); void efx_fini_sriov(void); +/* EF10 */ +static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } +static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} + #else -static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; } -static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; } +/* SIENA */ +static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; } +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; } static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; } - static inline int efx_init_sriov(void) { return 0; } -static inline void efx_sriov_probe(struct efx_nic *efx) {} -static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } -static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {} -static inline void efx_sriov_tx_flush_done(struct efx_nic *efx, - efx_qword_t *event) {} -static inline void efx_sriov_rx_flush_done(struct efx_nic *efx, - efx_qword_t *event) {} -static inline void efx_sriov_event(struct efx_channel *channel, - efx_qword_t *event) {} -static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {} -static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {} -static inline void efx_sriov_reset(struct efx_nic *efx) {} -static inline void efx_sriov_fini(struct efx_nic *efx) {} +static inline void efx_siena_sriov_probe(struct efx_nic *efx) {} +static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, + efx_qword_t *event) {} +static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, + efx_qword_t *event) {} +static inline void efx_siena_sriov_event(struct efx_channel *channel, + efx_qword_t *event) {} +static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, + unsigned dmaq) {} +static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {} +static inline void efx_siena_sriov_reset(struct efx_nic *efx) {} +static inline void efx_siena_sriov_fini(struct efx_nic *efx) {} static inline void efx_fini_sriov(void) {} +/* EF10 */ +static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } +static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} + #endif -int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); -int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos); -int efx_sriov_get_vf_config(struct net_device *dev, int vf, - struct ifla_vf_info *ivf); -int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, - bool spoofchk); +/* FALCON */ +static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; } +static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {} +static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {} + +int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); +int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf, + u16 vlan, u8 qos); +int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivf); +int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, + bool spoofchk); struct ethtool_ts_info; int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index ae696855f21acafbe4f38d55678f70722da5c66e..3583f0208a6ebbf08e04a55f53d6145283ed37d2 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -251,6 +251,7 @@ static int siena_probe_nic(struct efx_nic *efx) nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; + nic_data->efx = efx; efx->nic_data = nic_data; if (efx_farch_fpga_ver(efx) != 0) { @@ -306,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx) if (rc) goto fail5; - efx_sriov_probe(efx); + efx_siena_sriov_probe(efx); efx_ptp_defer_probe_with_channel(efx); return 0; @@ -996,6 +997,11 @@ const struct efx_nic_type siena_a0_nic_type = { #endif .ptp_write_host_time = siena_ptp_write_host_time, .ptp_set_ts_config = siena_ptp_set_ts_config, + .sriov_init = efx_siena_sriov_init, + .sriov_fini = efx_siena_sriov_fini, + .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed, + .sriov_wanted = efx_siena_sriov_wanted, + .sriov_reset = efx_siena_sriov_reset, .revision = EFX_REV_SIENA_A0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 43d2e64546ed1d924c91d26900b37d1b100dd994..a8bbbad68a88e6e3c2c9b7480bdb88582b8abf41 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -66,7 +66,7 @@ enum efx_vf_tx_filter_mode { * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr, * @peer_page_addrs and @peer_page_count from simultaneous * updates by the VM and consumption by - * efx_sriov_update_vf_addr() + * efx_siena_sriov_update_vf_addr() * @peer_page_addrs: Pointer to an array of guest pages for local addresses. * @peer_page_count: Number of entries in @peer_page_count. * @evq0_addrs: Array of guest pages backing evq0. @@ -194,8 +194,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index) return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; } -static int efx_sriov_cmd(struct efx_nic *efx, bool enable, - unsigned *vi_scale_out, unsigned *vf_total_out) +static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable, + unsigned *vi_scale_out, unsigned *vf_total_out) { MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN); @@ -227,18 +227,20 @@ static int efx_sriov_cmd(struct efx_nic *efx, bool enable, return 0; } -static void efx_sriov_usrev(struct efx_nic *efx, bool enabled) +static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled) { + struct siena_nic_data *nic_data = efx->nic_data; efx_oword_t reg; EFX_POPULATE_OWORD_2(reg, FRF_CZ_USREV_DIS, enabled ? 0 : 1, - FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel); + FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel); efx_writeo(efx, ®, FR_CZ_USR_EV_CFG); } -static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, - unsigned int count) +static int efx_siena_sriov_memcpy(struct efx_nic *efx, + struct efx_memcpy_req *req, + unsigned int count) { MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); MCDI_DECLARE_STRUCT_PTR(record); @@ -297,7 +299,7 @@ out: /* The TX filter is entirely controlled by this driver, and is modified * underneath the feet of the VF */ -static void efx_sriov_reset_tx_filter(struct efx_vf *vf) +static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct efx_filter_spec filter; @@ -341,7 +343,7 @@ static void efx_sriov_reset_tx_filter(struct efx_vf *vf) } /* The RX filter is managed here on behalf of the VF driver */ -static void efx_sriov_reset_rx_filter(struct efx_vf *vf) +static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct efx_filter_spec filter; @@ -380,22 +382,26 @@ static void efx_sriov_reset_rx_filter(struct efx_vf *vf) } } -static void __efx_sriov_update_vf_addr(struct efx_vf *vf) +static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf) { - efx_sriov_reset_tx_filter(vf); - efx_sriov_reset_rx_filter(vf); - queue_work(vfdi_workqueue, &vf->efx->peer_work); + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + + efx_siena_sriov_reset_tx_filter(vf); + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); } /* Push the peer list to this VF. The caller must hold status_lock to interlock * with VFDI requests, and they must be serialised against manipulation of * local_page_list, either by acquiring local_lock or by running from - * efx_sriov_peer_work() + * efx_siena_sriov_peer_work() */ -static void __efx_sriov_push_vf_status(struct efx_vf *vf) +static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; - struct vfdi_status *status = efx->vfdi_status.addr; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *status = nic_data->vfdi_status.addr; struct efx_memcpy_req copy[4]; struct efx_endpoint_page *epp; unsigned int pos, count; @@ -421,7 +427,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) */ data_offset = offsetof(struct vfdi_status, version); copy[1].from_rid = efx->pci_dev->devfn; - copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset; + copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset; copy[1].to_rid = vf->pci_rid; copy[1].to_addr = vf->status_addr + data_offset; copy[1].length = status->length - data_offset; @@ -429,7 +435,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) /* Copy the peer pages */ pos = 2; count = 0; - list_for_each_entry(epp, &efx->local_page_list, link) { + list_for_each_entry(epp, &nic_data->local_page_list, link) { if (count == vf->peer_page_count) { /* The VF driver will know they need to provide more * pages because peer_addr_count is too large. @@ -444,7 +450,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) copy[pos].length = EFX_PAGE_SIZE; if (++pos == ARRAY_SIZE(copy)) { - efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); + efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); pos = 0; } ++count; @@ -456,7 +462,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, generation_end); copy[pos].length = sizeof(status->generation_end); - efx_sriov_memcpy(efx, copy, pos + 1); + efx_siena_sriov_memcpy(efx, copy, pos + 1); /* Notify the guest */ EFX_POPULATE_QWORD_3(event, @@ -469,8 +475,8 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) &event); } -static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, - u64 *addr, unsigned count) +static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset, + u64 *addr, unsigned count) { efx_qword_t buf; unsigned pos; @@ -539,7 +545,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf) return VFDI_RC_EINVAL; } - efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); + efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); EFX_POPULATE_OWORD_3(reg, FRF_CZ_TIMER_Q_EN, 1, @@ -584,7 +590,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf) } if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) ++vf->rxq_count; - efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); + efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL); EFX_POPULATE_OWORD_6(reg, @@ -628,7 +634,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf) if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) ++vf->txq_count; mutex_unlock(&vf->txq_lock); - efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); + efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; @@ -742,8 +748,8 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, vf_offset + index); } - efx_sriov_bufs(efx, vf->buftbl_base, NULL, - EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); + efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL, + EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); efx_vfdi_flush_clear(vf); vf->evq0_count = 0; @@ -754,6 +760,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) static int efx_vfdi_insert_filter(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; struct vfdi_req *req = vf->buf.addr; unsigned vf_rxq = req->u.mac_filter.rxq; unsigned flags; @@ -776,17 +783,20 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf) vf->rx_filter_qid = vf_rxq; vf->rx_filtering = true; - efx_sriov_reset_rx_filter(vf); - queue_work(vfdi_workqueue, &efx->peer_work); + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); return VFDI_RC_SUCCESS; } static int efx_vfdi_remove_all_filters(struct efx_vf *vf) { + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + vf->rx_filtering = false; - efx_sriov_reset_rx_filter(vf); - queue_work(vfdi_workqueue, &vf->efx->peer_work); + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); return VFDI_RC_SUCCESS; } @@ -794,6 +804,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf) static int efx_vfdi_set_status_page(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; struct vfdi_req *req = vf->buf.addr; u64 page_count = req->u.set_status_page.peer_page_count; u64 max_page_count = @@ -809,7 +820,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf) return VFDI_RC_EINVAL; } - mutex_lock(&efx->local_lock); + mutex_lock(&nic_data->local_lock); mutex_lock(&vf->status_lock); vf->status_addr = req->u.set_status_page.dma_addr; @@ -828,9 +839,9 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf) } } - __efx_sriov_push_vf_status(vf); + __efx_siena_sriov_push_vf_status(vf); mutex_unlock(&vf->status_lock); - mutex_unlock(&efx->local_lock); + mutex_unlock(&nic_data->local_lock); return VFDI_RC_SUCCESS; } @@ -857,7 +868,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page, }; -static void efx_sriov_vfdi(struct work_struct *work) +static void efx_siena_sriov_vfdi(struct work_struct *work) { struct efx_vf *vf = container_of(work, struct efx_vf, req); struct efx_nic *efx = vf->efx; @@ -872,7 +883,7 @@ static void efx_sriov_vfdi(struct work_struct *work) copy[0].to_rid = efx->pci_dev->devfn; copy[0].to_addr = vf->buf.dma_addr; copy[0].length = EFX_PAGE_SIZE; - rc = efx_sriov_memcpy(efx, copy, 1); + rc = efx_siena_sriov_memcpy(efx, copy, 1); if (rc) { /* If we can't get the request, we can't reply to the caller */ if (net_ratelimit()) @@ -916,7 +927,7 @@ static void efx_sriov_vfdi(struct work_struct *work) copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); copy[1].length = sizeof(req->op); - (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); + (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); } @@ -925,7 +936,8 @@ static void efx_sriov_vfdi(struct work_struct *work) * event ring in guest memory with VFDI reset events, then (re-initialise) the * event queue to raise an interrupt. The guest driver will then recover. */ -static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) +static void efx_siena_sriov_reset_vf(struct efx_vf *vf, + struct efx_buffer *buffer) { struct efx_nic *efx = vf->efx; struct efx_memcpy_req copy_req[4]; @@ -961,7 +973,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) copy_req[k].to_addr = vf->evq0_addrs[pos + k]; copy_req[k].length = EFX_PAGE_SIZE; } - rc = efx_sriov_memcpy(efx, copy_req, count); + rc = efx_siena_sriov_memcpy(efx, copy_req, count); if (rc) { if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, @@ -974,7 +986,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) /* Reinitialise, arm and trigger evq0 */ abs_evq = abs_index(vf, 0); buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); - efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); + efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); EFX_POPULATE_OWORD_3(reg, FRF_CZ_TIMER_Q_EN, 1, @@ -992,19 +1004,19 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) mutex_unlock(&vf->status_lock); } -static void efx_sriov_reset_vf_work(struct work_struct *work) +static void efx_siena_sriov_reset_vf_work(struct work_struct *work) { struct efx_vf *vf = container_of(work, struct efx_vf, req); struct efx_nic *efx = vf->efx; struct efx_buffer buf; if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { - efx_sriov_reset_vf(vf, &buf); + efx_siena_sriov_reset_vf(vf, &buf); efx_nic_free_buffer(efx, &buf); } } -static void efx_sriov_handle_no_channel(struct efx_nic *efx) +static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx) { netif_err(efx, drv, efx->net_dev, "ERROR: IOV requires MSI-X and 1 additional interrupt" @@ -1012,35 +1024,38 @@ static void efx_sriov_handle_no_channel(struct efx_nic *efx) efx->vf_count = 0; } -static int efx_sriov_probe_channel(struct efx_channel *channel) +static int efx_siena_sriov_probe_channel(struct efx_channel *channel) { - channel->efx->vfdi_channel = channel; + struct siena_nic_data *nic_data = channel->efx->nic_data; + nic_data->vfdi_channel = channel; + return 0; } static void -efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len) +efx_siena_sriov_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) { snprintf(buf, len, "%s-iov", channel->efx->name); } -static const struct efx_channel_type efx_sriov_channel_type = { - .handle_no_channel = efx_sriov_handle_no_channel, - .pre_probe = efx_sriov_probe_channel, +static const struct efx_channel_type efx_siena_sriov_channel_type = { + .handle_no_channel = efx_siena_sriov_handle_no_channel, + .pre_probe = efx_siena_sriov_probe_channel, .post_remove = efx_channel_dummy_op_void, - .get_name = efx_sriov_get_channel_name, + .get_name = efx_siena_sriov_get_channel_name, /* no copy operation; channel must not be reallocated */ .keep_eventq = true, }; -void efx_sriov_probe(struct efx_nic *efx) +void efx_siena_sriov_probe(struct efx_nic *efx) { unsigned count; if (!max_vfs) return; - if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count)) + if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) return; if (count > 0 && count > max_vfs) count = max_vfs; @@ -1048,17 +1063,20 @@ void efx_sriov_probe(struct efx_nic *efx) /* efx_nic_dimension_resources() will reduce vf_count as appopriate */ efx->vf_count = count; - efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type; + efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type; } /* Copy the list of individual addresses into the vfdi_status.peers * array and auxillary pages, protected by %local_lock. Drop that lock * and then broadcast the address list to every VF. */ -static void efx_sriov_peer_work(struct work_struct *data) +static void efx_siena_sriov_peer_work(struct work_struct *data) { - struct efx_nic *efx = container_of(data, struct efx_nic, peer_work); - struct vfdi_status *vfdi_status = efx->vfdi_status.addr; + struct siena_nic_data *nic_data = container_of(data, + struct siena_nic_data, + peer_work); + struct efx_nic *efx = nic_data->efx; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; struct efx_vf *vf; struct efx_local_addr *local_addr; struct vfdi_endpoint *peer; @@ -1068,11 +1086,11 @@ static void efx_sriov_peer_work(struct work_struct *data) unsigned int peer_count; unsigned int pos; - mutex_lock(&efx->local_lock); + mutex_lock(&nic_data->local_lock); /* Move the existing peer pages off %local_page_list */ INIT_LIST_HEAD(&pages); - list_splice_tail_init(&efx->local_page_list, &pages); + list_splice_tail_init(&nic_data->local_page_list, &pages); /* Populate the VF addresses starting from entry 1 (entry 0 is * the PF address) @@ -1094,7 +1112,7 @@ static void efx_sriov_peer_work(struct work_struct *data) } /* Fill the remaining addresses */ - list_for_each_entry(local_addr, &efx->local_addr_list, link) { + list_for_each_entry(local_addr, &nic_data->local_addr_list, link) { ether_addr_copy(peer->mac_addr, local_addr->addr); peer->tci = 0; ++peer; @@ -1117,13 +1135,13 @@ static void efx_sriov_peer_work(struct work_struct *data) list_del(&epp->link); } - list_add_tail(&epp->link, &efx->local_page_list); + list_add_tail(&epp->link, &nic_data->local_page_list); peer = (struct vfdi_endpoint *)epp->ptr; peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint); } } vfdi_status->peer_count = peer_count; - mutex_unlock(&efx->local_lock); + mutex_unlock(&nic_data->local_lock); /* Free any now unused endpoint pages */ while (!list_empty(&pages)) { @@ -1141,25 +1159,26 @@ static void efx_sriov_peer_work(struct work_struct *data) mutex_lock(&vf->status_lock); if (vf->status_addr) - __efx_sriov_push_vf_status(vf); + __efx_siena_sriov_push_vf_status(vf); mutex_unlock(&vf->status_lock); } } -static void efx_sriov_free_local(struct efx_nic *efx) +static void efx_siena_sriov_free_local(struct efx_nic *efx) { + struct siena_nic_data *nic_data = efx->nic_data; struct efx_local_addr *local_addr; struct efx_endpoint_page *epp; - while (!list_empty(&efx->local_addr_list)) { - local_addr = list_first_entry(&efx->local_addr_list, + while (!list_empty(&nic_data->local_addr_list)) { + local_addr = list_first_entry(&nic_data->local_addr_list, struct efx_local_addr, link); list_del(&local_addr->link); kfree(local_addr); } - while (!list_empty(&efx->local_page_list)) { - epp = list_first_entry(&efx->local_page_list, + while (!list_empty(&nic_data->local_page_list)) { + epp = list_first_entry(&nic_data->local_page_list, struct efx_endpoint_page, link); list_del(&epp->link); dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, @@ -1168,7 +1187,7 @@ static void efx_sriov_free_local(struct efx_nic *efx) } } -static int efx_sriov_vf_alloc(struct efx_nic *efx) +static int efx_siena_sriov_vf_alloc(struct efx_nic *efx) { unsigned index; struct efx_vf *vf; @@ -1185,8 +1204,8 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx) vf->rx_filter_id = -1; vf->tx_filter_mode = VF_TX_FILTER_AUTO; vf->tx_filter_id = -1; - INIT_WORK(&vf->req, efx_sriov_vfdi); - INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work); + INIT_WORK(&vf->req, efx_siena_sriov_vfdi); + INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work); init_waitqueue_head(&vf->flush_waitq); mutex_init(&vf->status_lock); mutex_init(&vf->txq_lock); @@ -1195,7 +1214,7 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx) return 0; } -static void efx_sriov_vfs_fini(struct efx_nic *efx) +static void efx_siena_sriov_vfs_fini(struct efx_nic *efx) { struct efx_vf *vf; unsigned int pos; @@ -1212,9 +1231,10 @@ static void efx_sriov_vfs_fini(struct efx_nic *efx) } } -static int efx_sriov_vfs_init(struct efx_nic *efx) +static int efx_siena_sriov_vfs_init(struct efx_nic *efx) { struct pci_dev *pci_dev = efx->pci_dev; + struct siena_nic_data *nic_data = efx->nic_data; unsigned index, devfn, sriov, buftbl_base; u16 offset, stride; struct efx_vf *vf; @@ -1227,7 +1247,7 @@ static int efx_sriov_vfs_init(struct efx_nic *efx) pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); - buftbl_base = efx->vf_buftbl_base; + buftbl_base = nic_data->vf_buftbl_base; devfn = pci_dev->devfn + offset; for (index = 0; index < efx->vf_count; ++index) { vf = efx->vf + index; @@ -1253,13 +1273,14 @@ static int efx_sriov_vfs_init(struct efx_nic *efx) return 0; fail: - efx_sriov_vfs_fini(efx); + efx_siena_sriov_vfs_fini(efx); return rc; } -int efx_sriov_init(struct efx_nic *efx) +int efx_siena_sriov_init(struct efx_nic *efx) { struct net_device *net_dev = efx->net_dev; + struct siena_nic_data *nic_data = efx->nic_data; struct vfdi_status *vfdi_status; int rc; @@ -1271,15 +1292,15 @@ int efx_sriov_init(struct efx_nic *efx) if (efx->vf_count == 0) return 0; - rc = efx_sriov_cmd(efx, true, NULL, NULL); + rc = efx_siena_sriov_cmd(efx, true, NULL, NULL); if (rc) goto fail_cmd; - rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status), - GFP_KERNEL); + rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status, + sizeof(*vfdi_status), GFP_KERNEL); if (rc) goto fail_status; - vfdi_status = efx->vfdi_status.addr; + vfdi_status = nic_data->vfdi_status.addr; memset(vfdi_status, 0, sizeof(*vfdi_status)); vfdi_status->version = 1; vfdi_status->length = sizeof(*vfdi_status); @@ -1289,16 +1310,16 @@ int efx_sriov_init(struct efx_nic *efx) vfdi_status->peer_count = 1 + efx->vf_count; vfdi_status->timer_quantum_ns = efx->timer_quantum_ns; - rc = efx_sriov_vf_alloc(efx); + rc = efx_siena_sriov_vf_alloc(efx); if (rc) goto fail_alloc; - mutex_init(&efx->local_lock); - INIT_WORK(&efx->peer_work, efx_sriov_peer_work); - INIT_LIST_HEAD(&efx->local_addr_list); - INIT_LIST_HEAD(&efx->local_page_list); + mutex_init(&nic_data->local_lock); + INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work); + INIT_LIST_HEAD(&nic_data->local_addr_list); + INIT_LIST_HEAD(&nic_data->local_page_list); - rc = efx_sriov_vfs_init(efx); + rc = efx_siena_sriov_vfs_init(efx); if (rc) goto fail_vfs; @@ -1307,7 +1328,7 @@ int efx_sriov_init(struct efx_nic *efx) efx->vf_init_count = efx->vf_count; rtnl_unlock(); - efx_sriov_usrev(efx, true); + efx_siena_sriov_usrev(efx, true); /* At this point we must be ready to accept VFDI requests */ @@ -1321,34 +1342,35 @@ int efx_sriov_init(struct efx_nic *efx) return 0; fail_pci: - efx_sriov_usrev(efx, false); + efx_siena_sriov_usrev(efx, false); rtnl_lock(); efx->vf_init_count = 0; rtnl_unlock(); - efx_sriov_vfs_fini(efx); + efx_siena_sriov_vfs_fini(efx); fail_vfs: - cancel_work_sync(&efx->peer_work); - efx_sriov_free_local(efx); + cancel_work_sync(&nic_data->peer_work); + efx_siena_sriov_free_local(efx); kfree(efx->vf); fail_alloc: - efx_nic_free_buffer(efx, &efx->vfdi_status); + efx_nic_free_buffer(efx, &nic_data->vfdi_status); fail_status: - efx_sriov_cmd(efx, false, NULL, NULL); + efx_siena_sriov_cmd(efx, false, NULL, NULL); fail_cmd: return rc; } -void efx_sriov_fini(struct efx_nic *efx) +void efx_siena_sriov_fini(struct efx_nic *efx) { struct efx_vf *vf; unsigned int pos; + struct siena_nic_data *nic_data = efx->nic_data; if (efx->vf_init_count == 0) return; /* Disable all interfaces to reconfiguration */ - BUG_ON(efx->vfdi_channel->enabled); - efx_sriov_usrev(efx, false); + BUG_ON(nic_data->vfdi_channel->enabled); + efx_siena_sriov_usrev(efx, false); rtnl_lock(); efx->vf_init_count = 0; rtnl_unlock(); @@ -1359,19 +1381,19 @@ void efx_sriov_fini(struct efx_nic *efx) cancel_work_sync(&vf->req); cancel_work_sync(&vf->reset_work); } - cancel_work_sync(&efx->peer_work); + cancel_work_sync(&nic_data->peer_work); pci_disable_sriov(efx->pci_dev); /* Tear down back-end state */ - efx_sriov_vfs_fini(efx); - efx_sriov_free_local(efx); + efx_siena_sriov_vfs_fini(efx); + efx_siena_sriov_free_local(efx); kfree(efx->vf); - efx_nic_free_buffer(efx, &efx->vfdi_status); - efx_sriov_cmd(efx, false, NULL, NULL); + efx_nic_free_buffer(efx, &nic_data->vfdi_status); + efx_siena_sriov_cmd(efx, false, NULL, NULL); } -void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event) +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; struct efx_vf *vf; @@ -1428,7 +1450,7 @@ error: vf->req_seqno = seq + 1; } -void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i) +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i) { struct efx_vf *vf; @@ -1445,18 +1467,19 @@ void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i) vf->evq0_count = 0; } -void efx_sriov_mac_address_changed(struct efx_nic *efx) +void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) { - struct vfdi_status *vfdi_status = efx->vfdi_status.addr; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; if (!efx->vf_init_count) return; ether_addr_copy(vfdi_status->peers[0].mac_addr, efx->net_dev->dev_addr); - queue_work(vfdi_workqueue, &efx->peer_work); + queue_work(vfdi_workqueue, &nic_data->peer_work); } -void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) { struct efx_vf *vf; unsigned queue, qid; @@ -1475,7 +1498,7 @@ void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) wake_up(&vf->flush_waitq); } -void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) { struct efx_vf *vf; unsigned ev_failed, queue, qid; @@ -1500,7 +1523,7 @@ void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) } /* Called from napi. Schedule the reset work item */ -void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) { struct efx_vf *vf; unsigned int rel; @@ -1516,7 +1539,7 @@ void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) } /* Reset all VFs */ -void efx_sriov_reset(struct efx_nic *efx) +void efx_siena_sriov_reset(struct efx_nic *efx) { unsigned int vf_i; struct efx_buffer buf; @@ -1527,15 +1550,15 @@ void efx_sriov_reset(struct efx_nic *efx) if (efx->vf_init_count == 0) return; - efx_sriov_usrev(efx, true); - (void)efx_sriov_cmd(efx, true, NULL, NULL); + efx_siena_sriov_usrev(efx, true); + (void)efx_siena_sriov_cmd(efx, true, NULL, NULL); if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) return; for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { vf = efx->vf + vf_i; - efx_sriov_reset_vf(vf, &buf); + efx_siena_sriov_reset_vf(vf, &buf); } efx_nic_free_buffer(efx, &buf); @@ -1543,8 +1566,8 @@ void efx_sriov_reset(struct efx_nic *efx) int efx_init_sriov(void) { - /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and - * efx_sriov_peer_work() spend almost all their time sleeping for + /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and + * efx_siena_sriov_peer_work() spend almost all their time sleeping for * MCDI to complete anyway */ vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); @@ -1559,7 +1582,7 @@ void efx_fini_sriov(void) destroy_workqueue(vfdi_workqueue); } -int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) +int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; @@ -1570,14 +1593,14 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) mutex_lock(&vf->status_lock); ether_addr_copy(vf->addr.mac_addr, mac); - __efx_sriov_update_vf_addr(vf); + __efx_siena_sriov_update_vf_addr(vf); mutex_unlock(&vf->status_lock); return 0; } -int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, - u16 vlan, u8 qos) +int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, + u16 vlan, u8 qos) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; @@ -1590,14 +1613,14 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, mutex_lock(&vf->status_lock); tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); vf->addr.tci = htons(tci); - __efx_sriov_update_vf_addr(vf); + __efx_siena_sriov_update_vf_addr(vf); mutex_unlock(&vf->status_lock); return 0; } -int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, - bool spoofchk) +int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; @@ -1620,8 +1643,8 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, return rc; } -int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, - struct ifla_vf_info *ivi) +int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i, + struct ifla_vf_info *ivi) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 77ed74561e5fe815de9c7efdd1477404d3591b81..f9c87624a0afb08bc6d65a0bb7abfa328c440ca9 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -59,6 +59,8 @@ #include #include #include +#include + #include "smsc911x.h" #define SMSC_CHIPNAME "smsc911x" @@ -2338,6 +2340,9 @@ static int smsc911x_drv_remove(struct platform_device *pdev) free_netdev(dev); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; } @@ -2491,6 +2496,9 @@ static int smsc911x_drv_probe(struct platform_device *pdev) if (pdata->config.shift) pdata->ops = &shifted_smsc911x_ops; + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + retval = smsc911x_init(dev); if (retval < 0) goto out_disable_resources; @@ -2572,6 +2580,8 @@ out_unregister_netdev_5: out_free_irq: free_irq(dev->irq, dev); out_disable_resources: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); (void)smsc911x_disable_resources(pdev); out_enable_resources_fail: smsc911x_free_resources(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index b02d4a3ffa379666477d69a7625db1fe1e020aa7..7d3af190be55d9a4ea49ed903b261bec45a319ce 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -14,62 +14,20 @@ config STMMAC_ETH if STMMAC_ETH config STMMAC_PLATFORM - bool "STMMAC Platform bus support" + tristate "STMMAC Platform bus support" depends on STMMAC_ETH default y ---help--- - This selects the platform specific bus support for - the stmmac device driver. This is the driver used - on many embedded STM platforms based on ARM and SuperH - processors. + This selects the platform specific bus support for the stmmac driver. + This is the driver used on several SoCs: + STi, Allwinner, Amlogic Meson, Altera SOCFPGA. + If you have a controller with this interface, say Y or M here. If unsure, say N. -config DWMAC_MESON - bool "Amlogic Meson dwmac support" - depends on STMMAC_PLATFORM && ARCH_MESON - help - Support for Ethernet controller on Amlogic Meson SoCs. - - This selects the Amlogic Meson SoC glue layer support for - the stmmac device driver. This driver is used for Meson6 and - Meson8 SoCs. - -config DWMAC_SOCFPGA - bool "SOCFPGA dwmac support" - depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST) - help - Support for ethernet controller on Altera SOCFPGA - - This selects the Altera SOCFPGA SoC glue layer support - for the stmmac device driver. This driver is used for - arria5 and cyclone5 FPGA SoCs. - -config DWMAC_SUNXI - bool "Allwinner GMAC support" - depends on STMMAC_PLATFORM && ARCH_SUNXI - default y - ---help--- - Support for Allwinner A20/A31 GMAC ethernet controllers. - - This selects Allwinner SoC glue layer support for the - stmmac device driver. This driver is used for A20/A31 - GMAC ethernet controller. - -config DWMAC_STI - bool "STi GMAC support" - depends on STMMAC_PLATFORM && ARCH_STI - default y - ---help--- - Support for ethernet controller on STi SOCs. - - This selects STi SoC glue layer support for the stmmac - device driver. This driver is used on for the STi series - SOCs GMAC ethernet controller. - config STMMAC_PCI - bool "STMMAC PCI bus support" + tristate "STMMAC PCI bus support" depends on STMMAC_ETH && PCI ---help--- This is to select the Synopsys DWMAC available on PCI devices, @@ -79,22 +37,4 @@ config STMMAC_PCI D1215994A VIRTEX FPGA board. If unsure, say N. - -config STMMAC_DEBUG_FS - bool "Enable monitoring via sysFS " - default n - depends on STMMAC_ETH && DEBUG_FS - ---help--- - The stmmac entry in /sys reports DMA TX/RX rings - or (if supported) the HW cap register. - -config STMMAC_DA - bool "STMMAC DMA arbitration scheme" - default n - ---help--- - Selecting this option, rx has priority over Tx (only for Giga - Ethernet device). - By default, the DMA arbitration scheme is based on Round-robin - (rx:tx priority is 1:1). - endif diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 0533d0ba783d1d238f23fd367dc36763ef2950e4..ac4d5629d905f73572cbc6c1b88ffb670f58a7ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -1,11 +1,12 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o -stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o -stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o -stmmac-$(CONFIG_DWMAC_MESON) += dwmac-meson.o -stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o -stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o -stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ - chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ - dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ + chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ + dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y) + +obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o +stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \ + dwmac-sti.o dwmac-socfpga.o + +obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o +stmmac-pci-objs:= stmmac_pci.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 593e6c4144a7c197140f875f73e86c5583c215d1..cd77289c3cfe3aad2715cd27d1d20c0ecea79674 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -44,6 +44,7 @@ #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ +/* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { /* Transmit errors */ unsigned long tx_underflow ____cacheline_aligned; @@ -220,6 +221,7 @@ enum dma_irq_status { handle_tx = 0x8, }; +/* EEE and LPI defines */ #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) @@ -229,6 +231,7 @@ enum dma_irq_status { #define CORE_PCS_LINK_STATUS (1 << 6) #define CORE_RGMII_IRQ (1 << 7) +/* Physical Coding Sublayer */ struct rgmii_adv { unsigned int pause; unsigned int duplex; @@ -294,6 +297,7 @@ struct dma_features { #define JUMBO_LEN 9000 +/* Descriptors helpers */ struct stmmac_desc_ops { /* DMA RX descriptor ring initialization */ void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode, @@ -341,6 +345,10 @@ struct stmmac_desc_ops { int (*get_rx_timestamp_status) (void *desc, u32 ats); }; +extern const struct stmmac_desc_ops enh_desc_ops; +extern const struct stmmac_desc_ops ndesc_ops; + +/* Specific DMA helpers */ struct stmmac_dma_ops { /* DMA core initialization */ int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, @@ -370,6 +378,7 @@ struct stmmac_dma_ops { struct mac_device_info; +/* Helpers to program the MAC core */ struct stmmac_ops { /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, int mtu); @@ -400,6 +409,7 @@ struct stmmac_ops { void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv); }; +/* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); void (*config_sub_second_increment) (void __iomem *ioaddr); @@ -410,6 +420,8 @@ struct stmmac_hwtimestamp { u64(*get_systime) (void __iomem *ioaddr); }; +extern const struct stmmac_hwtimestamp stmmac_ptp; + struct mac_link { int port; int duplex; @@ -421,6 +433,7 @@ struct mii_regs { unsigned int data; /* MII Data */ }; +/* Helpers to manage the descriptors for chain and ring modes */ struct stmmac_mode_ops { void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index d225a603e604b7a21b8e5c8d021c971448aa7dcb..cca028d632f611ee99c3396e9f57da3c9360dbe5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -18,6 +18,8 @@ #include #include +#include "stmmac_platform.h" + #define ETHMAC_SPEED_100 BIT(1) struct meson_dwmac { @@ -56,7 +58,7 @@ static void *meson6_dwmac_setup(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dwmac->reg)) - return dwmac->reg; + return ERR_CAST(dwmac->reg); return dwmac; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 3aad413e74b4a4ba9dd19012cf85460870840015..e97074cd5800a7c67466b60925cb6dfaf6aaab0b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -23,7 +23,9 @@ #include #include #include + #include "stmmac.h" +#include "stmmac_platform.h" #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index ccfe7e510418ae269e99e716ccd8a1a246777aa8..0e137751e76e45b4e904a9156c6a4a07aad58e09 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -1,4 +1,4 @@ -/** +/* * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer * * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited @@ -22,6 +22,8 @@ #include #include +#include "stmmac_platform.h" + #define DWMAC_125MHZ 125000000 #define DWMAC_50MHZ 50000000 #define DWMAC_25MHZ 25000000 @@ -35,9 +37,8 @@ #define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ iface == PHY_INTERFACE_MODE_GMII) -/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families) */ - -/** +/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families) + * * Below table summarizes the clock requirement and clock sources for * supported phy interface modes with link speeds. * ________________________________________________ @@ -76,9 +77,7 @@ #define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) #define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6) -/* STiD127 register definitions */ - -/** +/* STiD127 register definitions *----------------------- * src |BIT(6)| BIT(7)| *----------------------- @@ -104,13 +103,13 @@ #define EN_MASK GENMASK(1, 1) #define EN BIT(1) -/** +/* * 3 bits [4:2] * 000-GMII/MII * 001-RGMII * 010-SGMII * 100-RMII -*/ + */ #define MII_PHY_SEL_MASK GENMASK(4, 2) #define ETH_PHY_SEL_RMII BIT(4) #define ETH_PHY_SEL_SGMII BIT(3) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 771cd15fca18c609ca2f727fd122e9d1b41a6e72..c5ea9ab75b03c806a803a5b6b42b9e83920bd768 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -1,4 +1,4 @@ -/** +/* * dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer * * Copyright (C) 2013 Chen-Yu Tsai @@ -22,6 +22,8 @@ #include #include +#include "stmmac_platform.h" + struct sunxi_priv_data { int interface; int clk_enabled; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 0c2058a69fd21fea367e56687a8f56066cec1acc..59d92e811750295e6125cdfcf4f410da6aa21bba 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -70,10 +70,6 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, if (mb) value |= DMA_BUS_MODE_MB; -#ifdef CONFIG_STMMAC_DA - value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ -#endif - if (atds) value |= DMA_BUS_MODE_ATDS; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index c3c40650b309ca94519af4de873619ec7be3a7ee..c0a39198337268d88b8996f9607794c8a3f4f14b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -122,9 +122,7 @@ int stmmac_mdio_unregister(struct net_device *ndev); int stmmac_mdio_register(struct net_device *ndev); int stmmac_mdio_reset(struct mii_bus *mii); void stmmac_set_ethtool_ops(struct net_device *netdev); -extern const struct stmmac_desc_ops enh_desc_ops; -extern const struct stmmac_desc_ops ndesc_ops; -extern const struct stmmac_hwtimestamp stmmac_ptp; + int stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_resume(struct net_device *ndev); @@ -136,77 +134,4 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, void stmmac_disable_eee_mode(struct stmmac_priv *priv); bool stmmac_eee_init(struct stmmac_priv *priv); -#ifdef CONFIG_STMMAC_PLATFORM -#ifdef CONFIG_DWMAC_MESON -extern const struct stmmac_of_data meson6_dwmac_data; -#endif -#ifdef CONFIG_DWMAC_SUNXI -extern const struct stmmac_of_data sun7i_gmac_data; -#endif -#ifdef CONFIG_DWMAC_STI -extern const struct stmmac_of_data stih4xx_dwmac_data; -extern const struct stmmac_of_data stid127_dwmac_data; -#endif -#ifdef CONFIG_DWMAC_SOCFPGA -extern const struct stmmac_of_data socfpga_gmac_data; -#endif -extern struct platform_driver stmmac_pltfr_driver; -static inline int stmmac_register_platform(void) -{ - int err; - - err = platform_driver_register(&stmmac_pltfr_driver); - if (err) - pr_err("stmmac: failed to register the platform driver\n"); - - return err; -} - -static inline void stmmac_unregister_platform(void) -{ - platform_driver_unregister(&stmmac_pltfr_driver); -} -#else -static inline int stmmac_register_platform(void) -{ - pr_debug("stmmac: do not register the platf driver\n"); - - return 0; -} - -static inline void stmmac_unregister_platform(void) -{ -} -#endif /* CONFIG_STMMAC_PLATFORM */ - -#ifdef CONFIG_STMMAC_PCI -extern struct pci_driver stmmac_pci_driver; -static inline int stmmac_register_pci(void) -{ - int err; - - err = pci_register_driver(&stmmac_pci_driver); - if (err) - pr_err("stmmac: failed to register the PCI driver\n"); - - return err; -} - -static inline void stmmac_unregister_pci(void) -{ - pci_unregister_driver(&stmmac_pci_driver); -} -#else -static inline int stmmac_register_pci(void) -{ - pr_debug("stmmac: do not register the PCI driver\n"); - - return 0; -} - -static inline void stmmac_unregister_pci(void) -{ -} -#endif /* CONFIG_STMMAC_PCI */ - #endif /* __STMMAC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 3a08a1f78c732b49992cc711907131d74b5e18e8..771cda2a48b2a932daa17600446e29a0bb26823a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -696,7 +696,7 @@ static int stmmac_set_coalesce(struct net_device *dev, (ec->tx_max_coalesced_frames == 0)) return -EINVAL; - if ((ec->tx_coalesce_usecs > STMMAC_COAL_TX_TIMER) || + if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) || (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) return -EINVAL; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 18c46bb0f3bfa296096ab2ec6624a4bd27bd304f..118a427d1942068f7cf37e8a384676bcbd21089e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -44,10 +44,10 @@ #include #include #include -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS #include #include -#endif /* CONFIG_STMMAC_DEBUG_FS */ +#endif /* CONFIG_DEBUG_FS */ #include #include "stmmac_ptp.h" #include "stmmac.h" @@ -116,7 +116,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS static int stmmac_init_fs(struct net_device *dev); static void stmmac_exit_fs(void); #endif @@ -125,8 +125,8 @@ static void stmmac_exit_fs(void); /** * stmmac_verify_args - verify the driver parameters. - * Description: it verifies if some wrong parameter is passed to the driver. - * Note that wrong parameters are replaced with the default values. + * Description: it checks the driver parameters and set a default in case of + * errors. */ static void stmmac_verify_args(void) { @@ -191,14 +191,8 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) static void print_pkt(unsigned char *buf, int len) { - int j; - pr_debug("len = %d byte, buf addr: 0x%p", len, buf); - for (j = 0; j < len; j++) { - if ((j % 16) == 0) - pr_debug("\n %03x:", j); - pr_debug(" %02x", buf[j]); - } - pr_debug("\n"); + pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); } /* minimum number of free TX descriptors required to wake up TX process */ @@ -210,7 +204,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) } /** - * stmmac_hw_fix_mac_speed: callback for speed selection + * stmmac_hw_fix_mac_speed - callback for speed selection * @priv: driver private structure * Description: on some platforms (e.g. ST), some HW system configuraton * registers have to be set according to the link speed negotiated. @@ -224,9 +218,10 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) } /** - * stmmac_enable_eee_mode: Check and enter in LPI mode + * stmmac_enable_eee_mode - check and enter in LPI mode * @priv: driver private structure - * Description: this function is to verify and enter in LPI mode for EEE. + * Description: this function is to verify and enter in LPI mode in case of + * EEE. */ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) { @@ -237,7 +232,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) } /** - * stmmac_disable_eee_mode: disable/exit from EEE + * stmmac_disable_eee_mode - disable and exit from LPI mode * @priv: driver private structure * Description: this function is to exit and disable EEE in case of * LPI state is true. This is called by the xmit. @@ -250,7 +245,7 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv) } /** - * stmmac_eee_ctrl_timer: EEE TX SW timer. + * stmmac_eee_ctrl_timer - EEE TX SW timer. * @arg : data hook * Description: * if there is no data transfer and if we are not in LPI state, @@ -265,13 +260,12 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) } /** - * stmmac_eee_init: init EEE + * stmmac_eee_init - init EEE * @priv: driver private structure * Description: - * If the EEE support has been enabled while configuring the driver, - * if the GMAC actually supports the EEE (from the HW cap reg) and the - * phy can also manage EEE, so enable the LPI state and start the timer - * to verify if the tx path can enter in LPI state. + * if the GMAC supports the EEE (from the HW cap reg) and the phy device + * can also manage EEE, this function enable the LPI state and start related + * timer. */ bool stmmac_eee_init(struct stmmac_priv *priv) { @@ -338,7 +332,7 @@ out: return ret; } -/* stmmac_get_tx_hwtstamp: get HW TX timestamps +/* stmmac_get_tx_hwtstamp - get HW TX timestamps * @priv: driver private structure * @entry : descriptor index to be used. * @skb : the socket buffer @@ -380,7 +374,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, return; } -/* stmmac_get_rx_hwtstamp: get HW RX timestamps +/* stmmac_get_rx_hwtstamp - get HW RX timestamps * @priv: driver private structure * @entry : descriptor index to be used. * @skb : the socket buffer @@ -636,11 +630,11 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) } /** - * stmmac_init_ptp: init PTP + * stmmac_init_ptp - init PTP * @priv: driver private structure - * Description: this is to verify if the HW supports the PTPv1 or v2. + * Description: this is to verify if the HW supports the PTPv1 or PTPv2. * This is done by looking at the HW cap. register. - * Also it registers the ptp driver. + * This function also registers the ptp driver. */ static int stmmac_init_ptp(struct stmmac_priv *priv) { @@ -682,9 +676,13 @@ static void stmmac_release_ptp(struct stmmac_priv *priv) } /** - * stmmac_adjust_link + * stmmac_adjust_link - adjusts the link parameters * @dev: net device structure - * Description: it adjusts the link parameters. + * Description: this is the helper called by the physical abstraction layer + * drivers to communicate the phy link status. According the speed and duplex + * this driver can invoke registered glue-logic as well. + * It also invoke the eee initialization because it could happen when switch + * on different networks (that are eee capable). */ static void stmmac_adjust_link(struct net_device *dev) { @@ -774,7 +772,7 @@ static void stmmac_adjust_link(struct net_device *dev) } /** - * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported + * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported * @priv: driver private structure * Description: this is to verify if the HW supports the PCS. * Physical Coding Sublayer (PCS) interface that can be used when the MAC is @@ -863,7 +861,7 @@ static int stmmac_init_phy(struct net_device *dev) } /** - * stmmac_display_ring: display ring + * stmmac_display_ring - display ring * @head: pointer to the head of the ring passed. * @size: size of the ring. * @extend_desc: to verify if extended descriptors are used. @@ -931,7 +929,7 @@ static int stmmac_set_bfsize(int mtu, int bufsize) } /** - * stmmac_clear_descriptors: clear descriptors + * stmmac_clear_descriptors - clear descriptors * @priv: driver private structure * Description: this function is called to clear the tx and rx descriptors * in case of both basic and extended descriptors are used. @@ -963,6 +961,15 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) (i == txsize - 1)); } +/** + * stmmac_init_rx_buffers - init the RX descriptor buffer. + * @priv: driver private structure + * @p: descriptor pointer + * @i: descriptor index + * @flags: gfp flag. + * Description: this function is called to allocate a receive buffer, perform + * the DMA mapping and init the descriptor. + */ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, int i, gfp_t flags) { @@ -1007,7 +1014,8 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) /** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure - * Description: this function initializes the DMA RX/TX descriptors + * @flags: gfp flag. + * Description: this function initializes the DMA RX/TX descriptors * and allocates the socket buffers. It suppors the chained and ring * modes. */ @@ -1144,6 +1152,14 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) } } +/** + * alloc_dma_desc_resources - alloc TX/RX resources. + * @priv: private structure + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ static int alloc_dma_desc_resources(struct stmmac_priv *priv) { unsigned int txsize = priv->dma_tx_size; @@ -1255,8 +1271,8 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) /** * stmmac_dma_operation_mode - HW DMA operation mode * @priv: driver private structure - * Description: it sets the DMA operation mode: tx/rx DMA thresholds - * or Store-And-Forward capability. + * Description: it is used for configuring the DMA operation mode register in + * order to program the tx/rx DMA thresholds or Store-And-Forward mode. */ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) { @@ -1277,9 +1293,9 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) } /** - * stmmac_tx_clean: + * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure - * Description: it reclaims resources after transmission completes. + * Description: it reclaims the transmit resources after transmission completes. */ static void stmmac_tx_clean(struct stmmac_priv *priv) { @@ -1378,10 +1394,10 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) } /** - * stmmac_tx_err: irq tx error mng function + * stmmac_tx_err - to manage the tx error * @priv: driver private structure * Description: it cleans the descriptors and restarts the transmission - * in case of errors. + * in case of transmission errors. */ static void stmmac_tx_err(struct stmmac_priv *priv) { @@ -1409,12 +1425,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv) } /** - * stmmac_dma_interrupt: DMA ISR + * stmmac_dma_interrupt - DMA ISR * @priv: driver private structure * Description: this is the DMA ISR. It is called by the main ISR. - * It calls the dwmac dma routine to understand which type of interrupt - * happened. In case of there is a Normal interrupt and either TX or RX - * interrupt happened so the NAPI is scheduled. + * It calls the dwmac dma routine and schedule poll method in case of some + * work can be done. */ static void stmmac_dma_interrupt(struct stmmac_priv *priv) { @@ -1457,6 +1472,12 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) pr_info(" No MAC Management Counters available\n"); } +/** + * stmmac_get_synopsys_id - return the SYINID. + * @priv: driver private structure + * Description: this simple function is to decode and return the SYINID + * starting from the HW core register. + */ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) { u32 hwid = priv->hw->synopsys_uid; @@ -1475,11 +1496,11 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) } /** - * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors + * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors * @priv: driver private structure * Description: select the Enhanced/Alternate or Normal descriptors. - * In case of Enhanced/Alternate, it looks at the extended descriptors are - * supported by the HW cap. register. + * In case of Enhanced/Alternate, it checks if the extended descriptors are + * supported by the HW capability register. */ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) { @@ -1501,7 +1522,7 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) } /** - * stmmac_get_hw_features: get MAC capabilities from the HW cap. register. + * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. * @priv: driver private structure * Description: * new GMAC chip generations have a new register to indicate the @@ -1559,7 +1580,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv) } /** - * stmmac_check_ether_addr: check if the MAC addr is valid + * stmmac_check_ether_addr - check if the MAC addr is valid * @priv: driver private structure * Description: * it is to verify if the MAC address is valid, in case of failures it @@ -1578,7 +1599,7 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) } /** - * stmmac_init_dma_engine: DMA init. + * stmmac_init_dma_engine - DMA init. * @priv: driver private structure * Description: * It inits the DMA invoking the specific MAC/GMAC callback. @@ -1607,7 +1628,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) } /** - * stmmac_tx_timer: mitigation sw timer for tx. + * stmmac_tx_timer - mitigation sw timer for tx. * @data: data pointer * Description: * This is the timer handler to directly invoke the stmmac_tx_clean. @@ -1620,7 +1641,7 @@ static void stmmac_tx_timer(unsigned long data) } /** - * stmmac_init_tx_coalesce: init tx mitigation options. + * stmmac_init_tx_coalesce - init tx mitigation options. * @priv: driver private structure * Description: * This inits the transmit coalesce parameters: i.e. timer rate, @@ -1639,10 +1660,13 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) } /** - * stmmac_hw_setup: setup mac in a usable state. + * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. * Description: - * This function sets up the ip in a usable state. + * this is the main function to setup the HW in a usable state because the + * dma engine is reset, the core registers are configured (e.g. AXI, + * Checksum features, timers). The DMA is ready to start receiving and + * transmitting. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. @@ -1688,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev) if (ret && ret != -EOPNOTSUPP) pr_warn("%s: failed PTP initialisation\n", __func__); -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS ret = stmmac_init_fs(dev); if (ret < 0) pr_warn("%s: failed debugFS registration\n", __func__); @@ -1870,7 +1894,7 @@ static int stmmac_release(struct net_device *dev) netif_carrier_off(dev); -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS stmmac_exit_fs(); #endif @@ -1880,7 +1904,7 @@ static int stmmac_release(struct net_device *dev) } /** - * stmmac_xmit: Tx entry point of the driver + * stmmac_xmit - Tx entry point of the driver * @skb : the socket buffer * @dev : device pointer * Description : this is the tx entry point of the driver. @@ -2055,7 +2079,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) /** - * stmmac_rx_refill: refill used skb preallocated buffers + * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure * Description : this is to reallocate the skb for the reception process * that is based on zero-copy. @@ -2106,7 +2130,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) } /** - * stmmac_rx_refill: refill used skb preallocated buffers + * stmmac_rx - manage the receive process * @priv: driver private structure * @limit: napi bugget. * Description : this the function called by the napi poll method. @@ -2375,8 +2399,11 @@ static int stmmac_set_features(struct net_device *netdev, * @irq: interrupt number. * @dev_id: to pass the net device pointer. * Description: this is the main driver interrupt service routine. - * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI - * interrupts. + * It can call: + * o DMA service routine (to manage incoming frame reception and transmission + * status) + * o Core interrupts to manage: remote wake-up, management counter, LPI + * interrupts. */ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) { @@ -2457,7 +2484,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return ret; } -#ifdef CONFIG_STMMAC_DEBUG_FS +#ifdef CONFIG_DEBUG_FS static struct dentry *stmmac_fs_dir; static struct dentry *stmmac_rings_status; static struct dentry *stmmac_dma_cap; @@ -2642,7 +2669,7 @@ static void stmmac_exit_fs(void) debugfs_remove(stmmac_dma_cap); debugfs_remove(stmmac_fs_dir); } -#endif /* CONFIG_STMMAC_DEBUG_FS */ +#endif /* CONFIG_DEBUG_FS */ static const struct net_device_ops stmmac_netdev_ops = { .ndo_open = stmmac_open, @@ -2663,11 +2690,10 @@ static const struct net_device_ops stmmac_netdev_ops = { /** * stmmac_hw_init - Init the MAC device * @priv: driver private structure - * Description: this function detects which MAC device - * (GMAC/MAC10-100) has to attached, checks the HW capability - * (if supported) and sets the driver's features (for example - * to use the ring or chaine mode or support the normal/enh - * descriptor structure). + * Description: this function is to configure the MAC device according to + * some platform parameters or the HW capability register. It prepares the + * driver to use either ring or chain modes and to setup either enhanced or + * normal descriptors. */ static int stmmac_hw_init(struct stmmac_priv *priv) { @@ -2891,6 +2917,7 @@ error_clk_get: return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(stmmac_dvr_probe); /** * stmmac_dvr_remove @@ -2920,8 +2947,15 @@ int stmmac_dvr_remove(struct net_device *ndev) return 0; } +EXPORT_SYMBOL_GPL(stmmac_dvr_remove); -#ifdef CONFIG_PM +/** + * stmmac_suspend - suspend callback + * @ndev: net device pointer + * Description: this is the function to suspend the device and it is called + * by the platform driver to stop the network queue, release the resources, + * program the PMT register (for WoL), clean and release driver resources. + */ int stmmac_suspend(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); @@ -2963,7 +2997,14 @@ int stmmac_suspend(struct net_device *ndev) priv->oldduplex = -1; return 0; } +EXPORT_SYMBOL_GPL(stmmac_suspend); +/** + * stmmac_resume - resume callback + * @ndev: net device pointer + * Description: when resume this function is invoked to setup the DMA and CORE + * in a usable state. + */ int stmmac_resume(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); @@ -3009,37 +3050,7 @@ int stmmac_resume(struct net_device *ndev) return 0; } -#endif /* CONFIG_PM */ - -/* Driver can be configured w/ and w/ both PCI and Platf drivers - * depending on the configuration selected. - */ -static int __init stmmac_init(void) -{ - int ret; - - ret = stmmac_register_platform(); - if (ret) - goto err; - ret = stmmac_register_pci(); - if (ret) - goto err_pci; - return 0; -err_pci: - stmmac_unregister_platform(); -err: - pr_err("stmmac: driver registration failed\n"); - return ret; -} - -static void __exit stmmac_exit(void) -{ - stmmac_unregister_platform(); - stmmac_unregister_pci(); -} - -module_init(stmmac_init); -module_exit(stmmac_exit); +EXPORT_SYMBOL_GPL(stmmac_resume); #ifndef MODULE static int __init stmmac_cmdline_opt(char *str) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index e17a970eaf2b87a1ff184270835102a85fa5f1f4..054520d67de4a93c4ca14098b8c27b5f6d44cde8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -26,34 +26,26 @@ #include #include "stmmac.h" -static struct plat_stmmacenet_data plat_dat; -static struct stmmac_mdio_bus_data mdio_data; -static struct stmmac_dma_cfg dma_cfg; - -static void stmmac_default_data(void) +static void stmmac_default_data(struct plat_stmmacenet_data *plat) { - memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); - - plat_dat.bus_id = 1; - plat_dat.phy_addr = 0; - plat_dat.interface = PHY_INTERFACE_MODE_GMII; - plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ - plat_dat.has_gmac = 1; - plat_dat.force_sf_dma_mode = 1; + plat->bus_id = 1; + plat->phy_addr = 0; + plat->interface = PHY_INTERFACE_MODE_GMII; + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ + plat->has_gmac = 1; + plat->force_sf_dma_mode = 1; - mdio_data.phy_reset = NULL; - mdio_data.phy_mask = 0; - plat_dat.mdio_bus_data = &mdio_data; + plat->mdio_bus_data->phy_reset = NULL; + plat->mdio_bus_data->phy_mask = 0; - dma_cfg.pbl = 32; - dma_cfg.burst_len = DMA_AXI_BLEN_256; - plat_dat.dma_cfg = &dma_cfg; + plat->dma_cfg->pbl = 32; + plat->dma_cfg->burst_len = DMA_AXI_BLEN_256; /* Set default value for multicast hash bins */ - plat_dat.multicast_filter_bins = HASH_TABLE_SIZE; + plat->multicast_filter_bins = HASH_TABLE_SIZE; /* Set default value for unicast filter entries */ - plat_dat.unicast_filter_entries = 1; + plat->unicast_filter_entries = 1; } /** @@ -71,64 +63,61 @@ static void stmmac_default_data(void) static int stmmac_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - int ret = 0; - void __iomem *addr = NULL; - struct stmmac_priv *priv = NULL; + struct plat_stmmacenet_data *plat; + struct stmmac_priv *priv; int i; + int ret; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + + plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), + GFP_KERNEL); + if (!plat->dma_cfg) + return -ENOMEM; /* Enable pci device */ - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) { - pr_err("%s : ERROR: failed to enable %s device\n", __func__, - pci_name(pdev)); + dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", + __func__); return ret; } - if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) { - pr_err("%s: ERROR: failed to get PCI region\n", __func__); - ret = -ENODEV; - goto err_out_req_reg_failed; - } /* Get the base address of device */ - for (i = 0; i <= 5; i++) { + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { if (pci_resource_len(pdev, i) == 0) continue; - addr = pci_iomap(pdev, i, 0); - if (addr == NULL) { - pr_err("%s: ERROR: cannot map register memory aborting", - __func__); - ret = -EIO; - goto err_out_map_failed; - } + ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev)); + if (ret) + return ret; break; } + pci_set_master(pdev); - stmmac_default_data(); + stmmac_default_data(plat); - priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr); + priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]); if (IS_ERR(priv)) { - pr_err("%s: main driver probe failed", __func__); - ret = PTR_ERR(priv); - goto err_out; + dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__); + return PTR_ERR(priv); } priv->dev->irq = pdev->irq; priv->wol_irq = pdev->irq; pci_set_drvdata(pdev, priv->dev); - pr_debug("STMMAC platform driver registration completed"); + dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n"); return 0; - -err_out: - pci_clear_master(pdev); -err_out_map_failed: - pci_release_regions(pdev); -err_out_req_reg_failed: - pci_disable_device(pdev); - - return ret; } /** @@ -141,39 +130,30 @@ err_out_req_reg_failed: static void stmmac_pci_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); stmmac_dvr_remove(ndev); - - pci_iounmap(pdev, priv->ioaddr); - pci_release_regions(pdev); - pci_disable_device(pdev); } -#ifdef CONFIG_PM -static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state) +#ifdef CONFIG_PM_SLEEP +static int stmmac_pci_suspend(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); - int ret; - - ret = stmmac_suspend(ndev); - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return ret; + return stmmac_suspend(ndev); } -static int stmmac_pci_resume(struct pci_dev *pdev) +static int stmmac_pci_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - return stmmac_resume(ndev); } #endif +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); + #define STMMAC_VENDOR_ID 0x700 #define STMMAC_DEVICE_ID 0x1108 @@ -185,17 +165,18 @@ static const struct pci_device_id stmmac_id_table[] = { MODULE_DEVICE_TABLE(pci, stmmac_id_table); -struct pci_driver stmmac_pci_driver = { +static struct pci_driver stmmac_pci_driver = { .name = STMMAC_RESOURCE_NAME, .id_table = stmmac_id_table, .probe = stmmac_pci_probe, .remove = stmmac_pci_remove, -#ifdef CONFIG_PM - .suspend = stmmac_pci_suspend, - .resume = stmmac_pci_resume, -#endif + .driver = { + .pm = &stmmac_pm_ops, + }, }; +module_pci_driver(stmmac_pci_driver); + MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); MODULE_AUTHOR("Rayagond Kokatanur "); MODULE_AUTHOR("Giuseppe Cavallaro "); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 58a1a0a423d494e2ce4fcc0861a02ff97e9077dc..4032b170fe243e230b117c49a420c7e11db21459 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -27,25 +27,19 @@ #include #include #include + #include "stmmac.h" +#include "stmmac_platform.h" static const struct of_device_id stmmac_dt_ids[] = { -#ifdef CONFIG_DWMAC_MESON + /* SoC specific glue layers should come before generic bindings */ { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data}, -#endif -#ifdef CONFIG_DWMAC_SUNXI { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, -#endif -#ifdef CONFIG_DWMAC_STI { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data}, { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data}, { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data}, { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data}, -#endif -#ifdef CONFIG_DWMAC_SOCFPGA { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data }, -#endif - /* SoC specific glue layers should come before generic bindings */ { .compatible = "st,spear600-gmac"}, { .compatible = "snps,dwmac-3.610"}, { .compatible = "snps,dwmac-3.70a"}, @@ -57,7 +51,11 @@ MODULE_DEVICE_TABLE(of, stmmac_dt_ids); #ifdef CONFIG_OF -/* This function validates the number of Multicast filtering bins specified +/** + * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins + * @mcast_bins: Multicast filtering bins + * Description: + * this function validates the number of Multicast filtering bins specified * by the configuration through the device tree. The Synopsys GMAC supports * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds @@ -83,7 +81,11 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins) return x; } -/* This function validates the number of Unicast address entries supported +/** + * dwmac1000_validate_ucast_entries - validate the Unicast address entries + * @ucast_entries: number of Unicast address entries + * Description: + * This function validates the number of Unicast address entries supported * by a particular Synopsys 10/100/1000 controller. The Synopsys controller * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter * logic. This function validates a valid, supported configuration is @@ -109,6 +111,15 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) return x; } +/** + * stmmac_probe_config_dt - parse device-tree driver parameters + * @pdev: platform_device structure + * @plat: driver data platform structure + * @mac: MAC address to use + * Description: + * this function is to read the driver parameters from device-tree and + * set some private fields that will be used by the main at runtime. + */ static int stmmac_probe_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat, const char **mac) @@ -242,11 +253,11 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, #endif /* CONFIG_OF */ /** - * stmmac_pltfr_probe + * stmmac_pltfr_probe - platform driver probe. * @pdev: platform device pointer - * Description: platform_device probe function. It allocates - * the necessary resources and invokes the main to init - * the net device, register the mdio bus etc. + * Description: platform_device probe function. It is to allocate + * the necessary platform resources, invoke custom helper (if required) and + * invoke the main probe function. */ static int stmmac_pltfr_probe(struct platform_device *pdev) { @@ -369,7 +380,14 @@ static int stmmac_pltfr_remove(struct platform_device *pdev) return ret; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +/** + * stmmac_pltfr_suspend + * @dev: device pointer + * Description: this function is invoked when suspend the driver and it direcly + * call the main suspend function and then, if required, on some platform, it + * can call an exit helper. + */ static int stmmac_pltfr_suspend(struct device *dev) { int ret; @@ -384,6 +402,13 @@ static int stmmac_pltfr_suspend(struct device *dev) return ret; } +/** + * stmmac_pltfr_resume + * @dev: device pointer + * Description: this function is invoked when resume the driver before calling + * the main resume function, on some platforms, it can call own init helper + * if required. + */ static int stmmac_pltfr_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); @@ -395,13 +420,12 @@ static int stmmac_pltfr_resume(struct device *dev) return stmmac_resume(ndev); } - -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, - stmmac_pltfr_suspend, stmmac_pltfr_resume); + stmmac_pltfr_suspend, stmmac_pltfr_resume); -struct platform_driver stmmac_pltfr_driver = { +static struct platform_driver stmmac_pltfr_driver = { .probe = stmmac_pltfr_probe, .remove = stmmac_pltfr_remove, .driver = { @@ -409,9 +433,11 @@ struct platform_driver stmmac_pltfr_driver = { .owner = THIS_MODULE, .pm = &stmmac_pltfr_pm_ops, .of_match_table = of_match_ptr(stmmac_dt_ids), - }, + }, }; +module_platform_driver(stmmac_pltfr_driver); + MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); MODULE_AUTHOR("Giuseppe Cavallaro "); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..25dd1f7ace02f8ce4ccce1981e19493c52712bb6 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -0,0 +1,28 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __STMMAC_PLATFORM_H__ +#define __STMMAC_PLATFORM_H__ + +extern const struct stmmac_of_data meson6_dwmac_data; +extern const struct stmmac_of_data sun7i_gmac_data; +extern const struct stmmac_of_data stih4xx_dwmac_data; +extern const struct stmmac_of_data stid127_dwmac_data; +extern const struct stmmac_of_data socfpga_gmac_data; + +#endif /* __STMMAC_PLATFORM_H__ */ diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 02d370e58110de31b74d1d1b89f4038b7b476a3f..3dc1f68b322d357d14a915a115d92ebef8fef9a2 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -5179,8 +5179,7 @@ static void cas_remove_one(struct pci_dev *pdev) cp = netdev_priv(dev); unregister_netdev(dev); - if (cp->fw_data) - vfree(cp->fw_data); + vfree(cp->fw_data); mutex_lock(&cp->pm_mutex); cancel_work_sync(&cp->reset_task); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 904fd1ab5f6e3762703d2931cf8fb6963c53c32c..4aaa3240453a4133e381044c4ade273ff03c8289 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6651,13 +6651,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - if (skb->len < ETH_ZLEN) { - unsigned int pad_bytes = ETH_ZLEN - skb->len; - - if (skb_pad(skb, pad_bytes)) - goto out; - skb_put(skb, pad_bytes); - } + if (eth_skb_pad(skb)) + goto out; len = sizeof(struct tx_pkt_hdr) + 15; if (skb_headroom(skb) < len) { diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 3652afd3ec78d9e395a7902d9fe3cc6da570b43a..90c86cd3be1443bc72d5d1d4469882950a4be4e8 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -15,12 +15,14 @@ #include #include #include +#include #include #if IS_ENABLED(CONFIG_IPV6) #include #endif +#include #include #include @@ -40,6 +42,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); +#define VNET_MAX_TXQS 16 + /* Heuristic for the number of times to exponentially backoff and * retry sending an LDC trigger when EAGAIN is encountered */ @@ -49,6 +53,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start); /* Ordered from largest major to lowest */ static struct vio_version vnet_versions[] = { + { .major = 1, .minor = 8 }, + { .major = 1, .minor = 7 }, { .major = 1, .minor = 6 }, { .major = 1, .minor = 0 }, }; @@ -71,13 +77,19 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg) return -ECONNRESET; } +static int vnet_port_alloc_tx_ring(struct vnet_port *port); + static int vnet_send_attr(struct vio_driver_state *vio) { struct vnet_port *port = to_vnet_port(vio); struct net_device *dev = port->vp->dev; struct vio_net_attr_info pkt; int framelen = ETH_FRAME_LEN; - int i; + int i, err; + + err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); + if (err) + return err; memset(&pkt, 0, sizeof(pkt)); pkt.tag.type = VIO_TYPE_CTRL; @@ -108,8 +120,15 @@ static int vnet_send_attr(struct vio_driver_state *vio) pkt.mtu = framelen + VLAN_HLEN; } - pkt.plnk_updt = PHYSLINK_UPDATE_NONE; pkt.cflags = 0; + if (vio_version_after_eq(vio, 1, 7) && port->tso) { + pkt.cflags |= VNET_LSO_IPV4_CAPAB; + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + pkt.ipv4_lso_maxlen = port->tsolen; + } + + pkt.plnk_updt = PHYSLINK_UPDATE_NONE; viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " @@ -163,6 +182,26 @@ static int handle_attr_info(struct vio_driver_state *vio, } port->rmtu = localmtu; + /* LSO negotiation */ + if (vio_version_after_eq(vio, 1, 7)) + port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); + else + port->tso = false; + if (port->tso) { + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); + if (port->tsolen < VNET_MINTSO) { + port->tso = false; + port->tsolen = 0; + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + } + pkt->ipv4_lso_maxlen = port->tsolen; + } else { + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + pkt->ipv4_lso_maxlen = 0; + } + /* for version >= 1.6, ACK packet mode we support */ if (vio_version_after_eq(vio, 1, 6)) { pkt->xfer_mode = VIO_NEW_DRING_MODE; @@ -274,10 +313,42 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev, return skb; } -static int vnet_rx_one(struct vnet_port *port, unsigned int len, - struct ldc_trans_cookie *cookies, int ncookies) +static inline void vnet_fullcsum(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + int offset = skb_transport_offset(skb); + + if (skb->protocol != htons(ETH_P_IP)) + return; + if (iph->protocol != IPPROTO_TCP && + iph->protocol != IPPROTO_UDP) + return; + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 1; + skb->csum = 0; + if (iph->protocol == IPPROTO_TCP) { + struct tcphdr *ptcp = tcp_hdr(skb); + + ptcp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_TCP, + skb->csum); + } else if (iph->protocol == IPPROTO_UDP) { + struct udphdr *pudp = udp_hdr(skb); + + pudp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_UDP, + skb->csum); + } +} + +static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) { struct net_device *dev = port->vp->dev; + unsigned int len = desc->size; unsigned int copy_len; struct sk_buff *skb; int err; @@ -299,7 +370,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len, skb_put(skb, copy_len); err = ldc_copy(port->vio.lp, LDC_COPY_IN, skb->data, copy_len, 0, - cookies, ncookies); + desc->cookies, desc->ncookies); if (unlikely(err < 0)) { dev->stats.rx_frame_errors++; goto out_free_skb; @@ -309,11 +380,33 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len, skb_trim(skb, len); skb->protocol = eth_type_trans(skb, dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; + if (vio_version_after_eq(&port->vio, 1, 8)) { + struct vio_net_dext *dext = vio_net_ext(desc); - netif_rx(skb); + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { + if (skb->protocol == ETH_P_IP) { + struct iphdr *iph = (struct iphdr *)skb->data; + iph->check = 0; + ip_send_check(iph); + } + } + if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && + skb->ip_summed == CHECKSUM_NONE) + vnet_fullcsum(skb); + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_level = 0; + if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) + skb->csum_level = 1; + } + } + + skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + napi_gro_receive(&port->napi, skb); return 0; out_free_skb: @@ -430,6 +523,7 @@ static int vnet_walk_rx_one(struct vnet_port *port, struct vio_driver_state *vio = &port->vio; int err; + BUG_ON(desc == NULL); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -444,7 +538,7 @@ static int vnet_walk_rx_one(struct vnet_port *port, desc->cookies[0].cookie_addr, desc->cookies[0].cookie_size); - err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); + err = vnet_rx_one(port, desc); if (err == -ECONNRESET) return err; desc->hdr.state = VIO_DESC_DONE; @@ -456,10 +550,11 @@ static int vnet_walk_rx_one(struct vnet_port *port, } static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, - u32 start, u32 end) + u32 start, u32 end, int *npkts, int budget) { struct vio_driver_state *vio = &port->vio; int ack_start = -1, ack_end = -1; + bool send_ack = true; end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); @@ -471,6 +566,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, return err; if (err != 0) break; + (*npkts)++; if (ack_start == -1) ack_start = start; ack_end = start; @@ -482,13 +578,26 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, return err; ack_start = -1; } + if ((*npkts) >= budget) { + send_ack = false; + break; + } } if (unlikely(ack_start == -1)) ack_start = ack_end = prev_idx(start, dr); - return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); + if (send_ack) { + port->napi_resume = false; + return vnet_send_ack(port, dr, ack_start, ack_end, + VIO_DRING_STOPPED); + } else { + port->napi_resume = true; + port->napi_stop_idx = ack_end; + return 1; + } } -static int vnet_rx(struct vnet_port *port, void *msgbuf) +static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, + int budget) { struct vio_dring_data *pkt = msgbuf; struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; @@ -505,11 +614,13 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf) return 0; } - dr->rcv_nxt++; + if (!port->napi_resume) + dr->rcv_nxt++; /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ - return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx); + return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, + npkts, budget); } static int idx_is_pending(struct vio_dring_state *dr, u32 end) @@ -535,19 +646,26 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) struct vnet *vp; u32 end; struct vio_net_desc *desc; + struct netdev_queue *txq; + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) return 0; end = pkt->end_idx; - if (unlikely(!idx_is_pending(dr, end))) + vp = port->vp; + dev = vp->dev; + netif_tx_lock(dev); + if (unlikely(!idx_is_pending(dr, end))) { + netif_tx_unlock(dev); return 0; + } /* sync for race conditions with vnet_start_xmit() and tell xmit it * is time to send a trigger. */ dr->cons = next_idx(end, dr); desc = vio_dring_entry(dr, dr->cons); - if (desc->hdr.state == VIO_DESC_READY && port->start_cons) { + if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { /* vnet_start_xmit() just populated this dring but missed * sending the "start" LDC message to the consumer. * Send a "start" trigger on its behalf. @@ -559,11 +677,10 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) } else { port->start_cons = true; } + netif_tx_unlock(dev); - - vp = port->vp; - dev = vp->dev; - if (unlikely(netif_queue_stopped(dev) && + txq = netdev_get_tx_queue(dev, port->q_index); + if (unlikely(netif_tx_queue_stopped(txq) && vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) return 1; @@ -591,58 +708,64 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf) return 0; } -static void maybe_tx_wakeup(unsigned long param) +/* Got back a STOPPED LDC message on port. If the queue is stopped, + * wake it up so that we'll send out another START message at the + * next TX. + */ +static void maybe_tx_wakeup(struct vnet_port *port) { - struct vnet *vp = (struct vnet *)param; - struct net_device *dev = vp->dev; + struct netdev_queue *txq; - netif_tx_lock(dev); - if (likely(netif_queue_stopped(dev))) { - struct vnet_port *port; - int wake = 1; + txq = netdev_get_tx_queue(port->vp->dev, port->q_index); + __netif_tx_lock(txq, smp_processor_id()); + if (likely(netif_tx_queue_stopped(txq))) { + struct vio_dring_state *dr; - list_for_each_entry(port, &vp->port_list, list) { - struct vio_dring_state *dr; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - if (vnet_tx_dring_avail(dr) < - VNET_TX_WAKEUP_THRESH(dr)) { - wake = 0; - break; - } - } - if (wake) - netif_wake_queue(dev); + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + netif_tx_wake_queue(txq); } - netif_tx_unlock(dev); + __netif_tx_unlock(txq); } -static void vnet_event(void *arg, int event) +static inline bool port_is_up(struct vnet_port *vnet) +{ + struct vio_driver_state *vio = &vnet->vio; + + return !!(vio->hs_state & VIO_HS_COMPLETE); +} + +static int vnet_event_napi(struct vnet_port *port, int budget) { - struct vnet_port *port = arg; struct vio_driver_state *vio = &port->vio; - unsigned long flags; int tx_wakeup, err; + int npkts = 0; + int event = (port->rx_event & LDC_EVENT_RESET); - spin_lock_irqsave(&vio->lock, flags); - +ldc_ctrl: if (unlikely(event == LDC_EVENT_RESET || event == LDC_EVENT_UP)) { vio_link_state_change(vio, event); - spin_unlock_irqrestore(&vio->lock, flags); if (event == LDC_EVENT_RESET) { port->rmtu = 0; + port->tso = true; + port->tsolen = 0; vio_port_up(vio); } - return; + port->rx_event = 0; + return 0; } + /* We may have multiple LDC events in rx_event. Unroll send_events() */ + event = (port->rx_event & LDC_EVENT_UP); + port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); + if (event == LDC_EVENT_UP) + goto ldc_ctrl; + event = port->rx_event; + if (!(event & LDC_EVENT_DATA_READY)) + return 0; - if (unlikely(event != LDC_EVENT_DATA_READY)) { - pr_warn("Unexpected LDC event %d\n", event); - spin_unlock_irqrestore(&vio->lock, flags); - return; - } + /* we dont expect any other bits than RESET, UP, DATA_READY */ + BUG_ON(event != LDC_EVENT_DATA_READY); tx_wakeup = err = 0; while (1) { @@ -651,6 +774,20 @@ static void vnet_event(void *arg, int event) u64 raw[8]; } msgbuf; + if (port->napi_resume) { + struct vio_dring_data *pkt = + (struct vio_dring_data *)&msgbuf; + struct vio_dring_state *dr = + &port->vio.drings[VIO_DRIVER_RX_RING]; + + pkt->tag.type = VIO_TYPE_DATA; + pkt->tag.stype = VIO_SUBTYPE_INFO; + pkt->tag.stype_env = VIO_DRING_DATA; + pkt->seq = dr->rcv_nxt; + pkt->start_idx = next_idx(port->napi_stop_idx, dr); + pkt->end_idx = -1; + goto napi_resume; + } err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); if (unlikely(err < 0)) { if (err == -ECONNRESET) @@ -667,10 +804,22 @@ static void vnet_event(void *arg, int event) err = vio_validate_sid(vio, &msgbuf.tag); if (err < 0) break; - +napi_resume: if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { - err = vnet_rx(port, &msgbuf); + if (!port_is_up(port)) { + /* failures like handshake_failure() + * may have cleaned up dring, but + * NAPI polling may bring us here. + */ + err = -ECONNRESET; + break; + } + err = vnet_rx(port, &msgbuf, &npkts, budget); + if (npkts >= budget) + break; + if (npkts == 0) + break; } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { err = vnet_ack(port, &msgbuf); if (err > 0) @@ -691,15 +840,34 @@ static void vnet_event(void *arg, int event) if (err == -ECONNRESET) break; } - spin_unlock(&vio->lock); - /* Kick off a tasklet to wake the queue. We cannot call - * maybe_tx_wakeup directly here because we could deadlock on - * netif_tx_lock() with dev_watchdog() - */ if (unlikely(tx_wakeup && err != -ECONNRESET)) - tasklet_schedule(&port->vp->vnet_tx_wakeup); + maybe_tx_wakeup(port); + return npkts; +} + +static int vnet_poll(struct napi_struct *napi, int budget) +{ + struct vnet_port *port = container_of(napi, struct vnet_port, napi); + struct vio_driver_state *vio = &port->vio; + int processed = vnet_event_napi(port, budget); + + if (processed < budget) { + napi_complete(napi); + port->rx_event &= ~LDC_EVENT_DATA_READY; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); + } + return processed; +} + +static void vnet_event(void *arg, int event) +{ + struct vnet_port *port = arg; + struct vio_driver_state *vio = &port->vio; + + port->rx_event |= event; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); + napi_schedule(&port->napi); - local_irq_restore(flags); } static int __vnet_tx_trigger(struct vnet_port *port, u32 start) @@ -746,26 +914,19 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start) return err; } -static inline bool port_is_up(struct vnet_port *vnet) -{ - struct vio_driver_state *vio = &vnet->vio; - - return !!(vio->hs_state & VIO_HS_COMPLETE); -} - struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) { unsigned int hash = vnet_hashfn(skb->data); struct hlist_head *hp = &vp->port_hash[hash]; struct vnet_port *port; - hlist_for_each_entry(port, hp, hash) { + hlist_for_each_entry_rcu(port, hp, hash) { if (!port_is_up(port)) continue; if (ether_addr_equal(port->raddr, skb->data)) return port; } - list_for_each_entry(port, &vp->port_list, list) { + list_for_each_entry_rcu(port, &vp->port_list, list) { if (!port->switch_port) continue; if (!port_is_up(port)) @@ -775,18 +936,6 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) return NULL; } -struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) -{ - struct vnet_port *ret; - unsigned long flags; - - spin_lock_irqsave(&vp->lock, flags); - ret = __tx_port_find(vp, skb); - spin_unlock_irqrestore(&vp->lock, flags); - - return ret; -} - static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, unsigned *pending) { @@ -847,11 +996,10 @@ static void vnet_clean_timer_expire(unsigned long port0) struct vnet_port *port = (struct vnet_port *)port0; struct sk_buff *freeskbs; unsigned pending; - unsigned long flags; - spin_lock_irqsave(&port->vio.lock, flags); + netif_tx_lock(port->vp->dev); freeskbs = vnet_clean_tx_ring(port, &pending); - spin_unlock_irqrestore(&port->vio.lock, flags); + netif_tx_unlock(port->vp->dev); vnet_free_skbs(freeskbs); @@ -862,11 +1010,54 @@ static void vnet_clean_timer_expire(unsigned long port0) del_timer(&port->clean_timer); } -static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart, - int *plen) +static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, + struct ldc_trans_cookie *cookies, int ncookies, + unsigned int map_perm) +{ + int i, nc, err, blen; + + /* header */ + blen = skb_headlen(skb); + if (blen < ETH_ZLEN) + blen = ETH_ZLEN; + blen += VNET_PACKET_SKIP; + blen += 8 - (blen & 7); + + err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies, + ncookies, map_perm); + if (err < 0) + return err; + nc = err; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u8 *vaddr; + + if (nc < ncookies) { + vaddr = kmap_atomic(skb_frag_page(f)); + blen = skb_frag_size(f); + blen += 8 - (blen & 7); + err = ldc_map_single(lp, vaddr + f->page_offset, + blen, cookies + nc, ncookies - nc, + map_perm); + kunmap_atomic(vaddr); + } else { + err = -EMSGSIZE; + } + + if (err < 0) { + ldc_unmap(lp, cookies, nc); + return err; + } + nc += err; + } + return nc; +} + +static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) { struct sk_buff *nskb; - int len, pad; + int i, len, pad, docopy; len = skb->len; pad = 0; @@ -876,51 +1067,223 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart, } len += VNET_PACKET_SKIP; pad += 8 - (len & 7); - len += 8 - (len & 7); + /* make sure we have enough cookies and alignment in every frag */ + docopy = skb_shinfo(skb)->nr_frags >= ncookies; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + + docopy |= f->page_offset & 7; + } if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || skb_tailroom(skb) < pad || - skb_headroom(skb) < VNET_PACKET_SKIP) { - nskb = alloc_and_align_skb(skb->dev, skb->len); + skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { + int start = 0, offset; + __wsum csum; + + len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + nskb = alloc_and_align_skb(skb->dev, len); + if (nskb == NULL) { + dev_kfree_skb(skb); + return NULL; + } skb_reserve(nskb, VNET_PACKET_SKIP); - if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { + + nskb->protocol = skb->protocol; + offset = skb_mac_header(skb) - skb->data; + skb_set_mac_header(nskb, offset); + offset = skb_network_header(skb) - skb->data; + skb_set_network_header(nskb, offset); + offset = skb_transport_header(skb) - skb->data; + skb_set_transport_header(nskb, offset); + + offset = 0; + nskb->csum_offset = skb->csum_offset; + nskb->ip_summed = skb->ip_summed; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + start = skb_checksum_start_offset(skb); + if (start) { + struct iphdr *iph = ip_hdr(nskb); + int offset = start + nskb->csum_offset; + + if (skb_copy_bits(skb, 0, nskb->data, start)) { + dev_kfree_skb(nskb); + dev_kfree_skb(skb); + return NULL; + } + *(__sum16 *)(skb->data + offset) = 0; + csum = skb_copy_and_csum_bits(skb, start, + nskb->data + start, + skb->len - start, 0); + if (iph->protocol == IPPROTO_TCP || + iph->protocol == IPPROTO_UDP) { + csum = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - start, + iph->protocol, csum); + } + *(__sum16 *)(nskb->data + offset) = csum; + + nskb->ip_summed = CHECKSUM_NONE; + } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { dev_kfree_skb(nskb); dev_kfree_skb(skb); return NULL; } (void)skb_put(nskb, skb->len); + if (skb_is_gso(skb)) { + skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; + skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; + } dev_kfree_skb(skb); skb = nskb; } - - *pstart = skb->data - VNET_PACKET_SKIP; - *plen = len; return skb; } +static u16 +vnet_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port = __tx_port_find(vp, skb); + + if (port == NULL) + return 0; + return port->q_index; +} + +static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev); + +static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) +{ + struct net_device *dev = port->vp->dev; + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct sk_buff *segs; + int maclen, datalen; + int status; + int gso_size, gso_type, gso_segs; + int hlen = skb_transport_header(skb) - skb_mac_header(skb); + int proto = IPPROTO_IP; + + if (skb->protocol == htons(ETH_P_IP)) + proto = ip_hdr(skb)->protocol; + else if (skb->protocol == htons(ETH_P_IPV6)) + proto = ipv6_hdr(skb)->nexthdr; + + if (proto == IPPROTO_TCP) + hlen += tcp_hdr(skb)->doff * 4; + else if (proto == IPPROTO_UDP) + hlen += sizeof(struct udphdr); + else { + pr_err("vnet_handle_offloads GSO with unknown transport " + "protocol %d tproto %d\n", skb->protocol, proto); + hlen = 128; /* XXX */ + } + datalen = port->tsolen - hlen; + + gso_size = skb_shinfo(skb)->gso_size; + gso_type = skb_shinfo(skb)->gso_type; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (port->tso && gso_size < datalen) + gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); + + if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, port->q_index); + netif_tx_stop_queue(txq); + if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) + return NETDEV_TX_BUSY; + netif_tx_wake_queue(txq); + } + + maclen = skb_network_header(skb) - skb_mac_header(skb); + skb_pull(skb, maclen); + + if (port->tso && gso_size < datalen) { + /* segment to TSO size */ + skb_shinfo(skb)->gso_size = datalen; + skb_shinfo(skb)->gso_segs = gso_segs; + + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); + + /* restore gso_size & gso_segs */ + skb_shinfo(skb)->gso_size = gso_size; + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hlen, + gso_size); + } else + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + skb_push(skb, maclen); + skb_reset_mac_header(skb); + + status = 0; + while (segs) { + struct sk_buff *curr = segs; + + segs = segs->next; + curr->next = NULL; + if (port->tso && curr->len > dev->mtu) { + skb_shinfo(curr)->gso_size = gso_size; + skb_shinfo(curr)->gso_type = gso_type; + skb_shinfo(curr)->gso_segs = + DIV_ROUND_UP(curr->len - hlen, gso_size); + } else + skb_shinfo(curr)->gso_size = 0; + + skb_push(curr, maclen); + skb_reset_mac_header(curr); + memcpy(skb_mac_header(curr), skb_mac_header(skb), + maclen); + curr->csum_start = skb_transport_header(curr) - curr->head; + if (ip_hdr(curr)->protocol == IPPROTO_TCP) + curr->csum_offset = offsetof(struct tcphdr, check); + else if (ip_hdr(curr)->protocol == IPPROTO_UDP) + curr->csum_offset = offsetof(struct udphdr, check); + + if (!(status & NETDEV_TX_MASK)) + status = vnet_start_xmit(curr, dev); + if (status & NETDEV_TX_MASK) + dev_kfree_skb_any(curr); + } + + if (!(status & NETDEV_TX_MASK)) + dev_kfree_skb_any(skb); + return status; +} + static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vnet *vp = netdev_priv(dev); - struct vnet_port *port = tx_port_find(vp, skb); + struct vnet_port *port = NULL; struct vio_dring_state *dr; struct vio_net_desc *d; - unsigned long flags; unsigned int len; struct sk_buff *freeskbs = NULL; int i, err, txi; - void *start = NULL; - int nlen = 0; unsigned pending = 0; + struct netdev_queue *txq; - if (unlikely(!port)) + rcu_read_lock(); + port = __tx_port_find(vp, skb); + if (unlikely(!port)) { + rcu_read_unlock(); goto out_dropped; + } - skb = vnet_skb_shape(skb, &start, &nlen); - - if (unlikely(!skb)) - goto out_dropped; + if (skb_is_gso(skb) && skb->len > port->tsolen) { + err = vnet_handle_offloads(port, skb); + rcu_read_unlock(); + return err; + } - if (skb->len > port->rmtu) { + if (!skb_is_gso(skb) && skb->len > port->rmtu) { unsigned long localmtu = port->rmtu - ETH_HLEN; if (vio_version_after_eq(&port->vio, 1, 3)) @@ -937,6 +1300,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) fl4.saddr = ip_hdr(skb)->saddr; rt = ip_route_output_key(dev_net(dev), &fl4); + rcu_read_unlock(); if (!IS_ERR(rt)) { skb_dst_set(skb, &rt->dst); icmp_send(skb, ICMP_DEST_UNREACH, @@ -951,18 +1315,26 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out_dropped; } - spin_lock_irqsave(&port->vio.lock, flags); + skb = vnet_skb_shape(skb, 2); + + if (unlikely(!skb)) + goto out_dropped; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + vnet_fullcsum(skb); dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + i = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, i); if (unlikely(vnet_tx_dring_avail(dr) < 1)) { - if (!netif_queue_stopped(dev)) { - netif_stop_queue(dev); + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); /* This is a hard error, log it. */ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); dev->stats.tx_errors++; } - spin_unlock_irqrestore(&port->vio.lock, flags); + rcu_read_unlock(); return NETDEV_TX_BUSY; } @@ -978,16 +1350,15 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (len < ETH_ZLEN) len = ETH_ZLEN; - port->tx_bufs[txi].skb = skb; - skb = NULL; - - err = ldc_map_single(port->vio.lp, start, nlen, - port->tx_bufs[txi].cookies, VNET_MAXCOOKIES, - (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); + err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, + (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); if (err < 0) { netdev_info(dev, "tx buffer map error %d\n", err); - goto out_dropped_unlock; + goto out_dropped; } + + port->tx_bufs[txi].skb = skb; + skb = NULL; port->tx_bufs[txi].ncookies = err; /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), @@ -1003,6 +1374,21 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) d->ncookies = port->tx_bufs[txi].ncookies; for (i = 0; i < d->ncookies; i++) d->cookies[i] = port->tx_bufs[txi].cookies[i]; + if (vio_version_after_eq(&port->vio, 1, 7)) { + struct vio_net_dext *dext = vio_net_ext(d); + + memset(dext, 0, sizeof(*dext)); + if (skb_is_gso(port->tx_bufs[txi].skb)) { + dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) + ->gso_size; + dext->flags |= VNET_PKT_IPV4_LSO; + } + if (vio_version_after_eq(&port->vio, 1, 8) && + !port->switch_port) { + dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; + dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; + } + } /* This has to be a non-SMP write barrier because we are writing * to memory which is shared with the peer LDOM. @@ -1039,7 +1425,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) netdev_info(dev, "TX trigger error %d\n", err); d->hdr.state = VIO_DESC_FREE; dev->stats.tx_carrier_errors++; - goto out_dropped_unlock; + goto out_dropped; } ldc_start_done: @@ -1050,31 +1436,29 @@ ldc_start_done: dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); if (unlikely(vnet_tx_dring_avail(dr) < 1)) { - netif_stop_queue(dev); + netif_tx_stop_queue(txq); if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) - netif_wake_queue(dev); + netif_tx_wake_queue(txq); } - spin_unlock_irqrestore(&port->vio.lock, flags); + (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); + rcu_read_unlock(); vnet_free_skbs(freeskbs); - (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); - return NETDEV_TX_OK; -out_dropped_unlock: - spin_unlock_irqrestore(&port->vio.lock, flags); - out_dropped: - if (skb) - dev_kfree_skb(skb); - vnet_free_skbs(freeskbs); if (pending) (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); else if (port) del_timer(&port->clean_timer); + if (port) + rcu_read_unlock(); + if (skb) + dev_kfree_skb(skb); + vnet_free_skbs(freeskbs); dev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -1087,14 +1471,14 @@ static void vnet_tx_timeout(struct net_device *dev) static int vnet_open(struct net_device *dev) { netif_carrier_on(dev); - netif_start_queue(dev); + netif_tx_start_all_queues(dev); return 0; } static int vnet_close(struct net_device *dev) { - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); netif_carrier_off(dev); return 0; @@ -1204,18 +1588,17 @@ static void vnet_set_rx_mode(struct net_device *dev) { struct vnet *vp = netdev_priv(dev); struct vnet_port *port; - unsigned long flags; - spin_lock_irqsave(&vp->lock, flags); - if (!list_empty(&vp->port_list)) { - port = list_entry(vp->port_list.next, struct vnet_port, list); + rcu_read_lock(); + list_for_each_entry_rcu(port, &vp->port_list, list) { if (port->switch_port) { __update_mc_list(vp, dev); __send_mc_list(vp, port); + break; } } - spin_unlock_irqrestore(&vp->lock, flags); + rcu_read_unlock(); } static int vnet_change_mtu(struct net_device *dev, int new_mtu) @@ -1295,18 +1678,20 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port) } } -static int vnet_port_alloc_tx_bufs(struct vnet_port *port) +static int vnet_port_alloc_tx_ring(struct vnet_port *port) { struct vio_dring_state *dr; - unsigned long len; + unsigned long len, elen; int i, err, ncookies; void *dring; dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - len = (VNET_TX_RING_SIZE * - (sizeof(struct vio_net_desc) + - (sizeof(struct ldc_trans_cookie) * 2))); + elen = sizeof(struct vio_net_desc) + + sizeof(struct ldc_trans_cookie) * 2; + if (vio_version_after_eq(&port->vio, 1, 7)) + elen += sizeof(struct vio_net_dext); + len = VNET_TX_RING_SIZE * elen; ncookies = VIO_MAX_RING_COOKIES; dring = ldc_alloc_exp_dring(port->vio.lp, len, @@ -1320,8 +1705,7 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port) } dr->base = dring; - dr->entry_size = (sizeof(struct vio_net_desc) + - (sizeof(struct ldc_trans_cookie) * 2)); + dr->entry_size = elen; dr->num_entries = VNET_TX_RING_SIZE; dr->prod = dr->cons = 0; port->start_cons = true; /* need an initial trigger */ @@ -1342,6 +1726,21 @@ err_out: return err; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void vnet_poll_controller(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + if (!list_empty(&vp->port_list)) { + port = list_entry(vp->port_list.next, struct vnet_port, list); + napi_schedule(&port->napi); + } + spin_unlock_irqrestore(&vp->lock, flags); +} +#endif static LIST_HEAD(vnet_list); static DEFINE_MUTEX(vnet_list_mutex); @@ -1354,6 +1753,10 @@ static const struct net_device_ops vnet_ops = { .ndo_tx_timeout = vnet_tx_timeout, .ndo_change_mtu = vnet_change_mtu, .ndo_start_xmit = vnet_start_xmit, + .ndo_select_queue = vnet_select_queue, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vnet_poll_controller, +#endif }; static struct vnet *vnet_new(const u64 *local_mac) @@ -1362,7 +1765,7 @@ static struct vnet *vnet_new(const u64 *local_mac) struct vnet *vp; int err, i; - dev = alloc_etherdev(sizeof(*vp)); + dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1); if (!dev) return ERR_PTR(-ENOMEM); dev->needed_headroom = VNET_PACKET_SKIP + 8; @@ -1374,7 +1777,6 @@ static struct vnet *vnet_new(const u64 *local_mac) vp = netdev_priv(dev); spin_lock_init(&vp->lock); - tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp); vp->dev = dev; INIT_LIST_HEAD(&vp->port_list); @@ -1387,6 +1789,10 @@ static struct vnet *vnet_new(const u64 *local_mac) dev->ethtool_ops = &vnet_ethtool_ops; dev->watchdog_timeo = VNET_TX_TIMEOUT; + dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | + NETIF_F_HW_CSUM | NETIF_F_SG; + dev->features = dev->hw_features; + err = register_netdev(dev); if (err) { pr_err("Cannot register net device, aborting\n"); @@ -1434,7 +1840,6 @@ static void vnet_cleanup(void) vp = list_first_entry(&vnet_list, struct vnet, list); list_del(&vp->list); dev = vp->dev; - tasklet_kill(&vp->vnet_tx_wakeup); /* vio_unregister_driver() should have cleaned up port_list */ BUG_ON(!list_empty(&vp->port_list)); unregister_netdev(dev); @@ -1489,6 +1894,25 @@ static void print_version(void) const char *remote_macaddr_prop = "remote-mac-address"; +static void +vnet_port_add_txq(struct vnet_port *port) +{ + struct vnet *vp = port->vp; + int n; + + n = vp->nports++; + n = n & (VNET_MAX_TXQS - 1); + port->q_index = n; + netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); +} + +static void +vnet_port_rm_txq(struct vnet_port *port) +{ + port->vp->nports--; + netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); +} + static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct mdesc_handle *hp; @@ -1536,9 +1960,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (err) goto err_out_free_port; - err = vnet_port_alloc_tx_bufs(port); - if (err) - goto err_out_free_ldc; + netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT); INIT_HLIST_NODE(&port->hash); INIT_LIST_HEAD(&port->list); @@ -1547,13 +1969,17 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) switch_port = 1; port->switch_port = switch_port; + port->tso = true; + port->tsolen = 0; spin_lock_irqsave(&vp->lock, flags); if (switch_port) - list_add(&port->list, &vp->port_list); + list_add_rcu(&port->list, &vp->port_list); else - list_add_tail(&port->list, &vp->port_list); - hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); + list_add_tail_rcu(&port->list, &vp->port_list); + hlist_add_head_rcu(&port->hash, + &vp->port_hash[vnet_hashfn(port->raddr)]); + vnet_port_add_txq(port); spin_unlock_irqrestore(&vp->lock, flags); dev_set_drvdata(&vdev->dev, port); @@ -1564,15 +1990,13 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) setup_timer(&port->clean_timer, vnet_clean_timer_expire, (unsigned long)port); + napi_enable(&port->napi); vio_port_up(&port->vio); mdesc_release(hp); return 0; -err_out_free_ldc: - vio_ldc_free(&port->vio); - err_out_free_port: kfree(port); @@ -1586,17 +2010,18 @@ static int vnet_port_remove(struct vio_dev *vdev) struct vnet_port *port = dev_get_drvdata(&vdev->dev); if (port) { - struct vnet *vp = port->vp; - unsigned long flags; del_timer_sync(&port->vio.timer); - del_timer_sync(&port->clean_timer); - spin_lock_irqsave(&vp->lock, flags); - list_del(&port->list); - hlist_del(&port->hash); - spin_unlock_irqrestore(&vp->lock, flags); + napi_disable(&port->napi); + + list_del_rcu(&port->list); + hlist_del_rcu(&port->hash); + synchronize_rcu(); + del_timer_sync(&port->clean_timer); + vnet_port_rm_txq(port); + netif_napi_del(&port->napi); vnet_port_free_tx_bufs(port); vio_ldc_free(&port->vio); diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h index c9110454261905bc60861eb34a7bfe7c268f0b9f..01ca781916834aba44a5cb94b0917f641a9e39e8 100644 --- a/drivers/net/ethernet/sun/sunvnet.h +++ b/drivers/net/ethernet/sun/sunvnet.h @@ -20,6 +20,9 @@ #define VNET_TX_RING_SIZE 512 #define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) +#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */ +#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */ + /* VNET packets are sent in buffers with the first 6 bytes skipped * so that after the ethernet header the IPv4/IPv6 headers are aligned * properly. @@ -40,8 +43,9 @@ struct vnet_port { struct hlist_node hash; u8 raddr[ETH_ALEN]; - u8 switch_port; - u8 __pad; + unsigned switch_port:1; + unsigned tso:1; + unsigned __pad:14; struct vnet *vp; @@ -56,6 +60,13 @@ struct vnet_port { struct timer_list clean_timer; u64 rmtu; + u16 tsolen; + + struct napi_struct napi; + u32 napi_stop_idx; + bool napi_resume; + int rx_event; + u16 q_index; }; static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) @@ -97,7 +108,7 @@ struct vnet { struct list_head list; u64 local_mac; - struct tasklet_struct vnet_tx_wakeup; + int nports; }; #endif /* _SUNVNET_H */ diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 5d8cb79561134bc2769fc900d5654bc6cce40933..605dd909bcc32fb4f95520b1a210a438a30649c2 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_TI bool "Texas Instruments (TI) devices" default y - depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE)) + depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC config TI_DAVINCI_MDIO tristate "TI DaVinci MDIO Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE ) + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE select PHYLIB ---help--- This driver supports TI's DaVinci MDIO module. @@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO config TI_DAVINCI_CPDMA tristate "TI DaVinci CPDMA Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX ) + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS ---help--- This driver supports TI's DaVinci CPDMA dma engine. @@ -58,7 +58,7 @@ config TI_CPSW_PHY_SEL config TI_CPSW tristate "TI CPSW Switch Support" - depends on ARM && (ARCH_DAVINCI || SOC_AM33XX) + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO select TI_CPSW_PHY_SEL diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 0f56b1c0e08203ea4fdc6dd29b54480bdd87a41b..70a930ac4fa9a0d113de07f3b6d52775312defc3 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -638,14 +638,12 @@ static int w5100_hw_probe(struct platform_device *pdev) } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) - return -ENXIO; - mem_size = resource_size(mem); - priv->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + mem_size = resource_size(mem); + spin_lock_init(&priv->reg_lock); priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; if (priv->indirect) { diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index f961f14a047343e7b06a3ea6601656e7ce2c139b..7974b7d90fccd0525f3b1830995ba0accb8ed8a6 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -558,14 +558,12 @@ static int w5300_hw_probe(struct platform_device *pdev) } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) - return -ENXIO; - mem_size = resource_size(mem); - priv->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + mem_size = resource_size(mem); + spin_lock_init(&priv->reg_lock); priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE; if (priv->indirect) { diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 629077050fce02de2437ed73d6bf7f7b7de5a177..9c2d91ea0af48e35020594b73221f4354fc6dc5e 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -224,8 +224,7 @@ static void temac_dma_bd_release(struct net_device *ndev) dma_free_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, lp->tx_bd_v, lp->tx_bd_p); - if (lp->rx_skb) - kfree(lp->rx_skb); + kfree(lp->rx_skb); } /** diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 28dbbdc393ebf251cb782ff2ee4b5d276fa99d87..24858799c204fbe2640ad375b5ea75154b6aa795 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1200,8 +1200,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) unregister_netdev(ndev); - if (lp->phy_node) - of_node_put(lp->phy_node); + of_node_put(lp->phy_node); lp->phy_node = NULL; xemaclite_remove_ndev(ndev); diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index caed6eee289c3a65a38bb9bb1a711617dd8d5134..7f975a2c8990fcff4491b4171cae626490d45807 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -414,7 +414,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) * ================ * * Overview: - * Retrieves the address range used to access control and status + * Retrieves the address ranges used to access control and status * registers. * * Returns: @@ -422,8 +422,8 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) * * Arguments: * bdev - pointer to device information - * bar_start - pointer to store the start address - * bar_len - pointer to store the length of the area + * bar_start - pointer to store the start addresses + * bar_len - pointer to store the lengths of the areas * * Assumptions: * I am sure there are some. @@ -442,38 +442,47 @@ static void dfx_get_bars(struct device *bdev, if (dfx_bus_pci) { int num = dfx_use_mmio ? 0 : 1; - *bar_start = pci_resource_start(to_pci_dev(bdev), num); - *bar_len = pci_resource_len(to_pci_dev(bdev), num); + bar_start[0] = pci_resource_start(to_pci_dev(bdev), num); + bar_len[0] = pci_resource_len(to_pci_dev(bdev), num); + bar_start[2] = bar_start[1] = 0; + bar_len[2] = bar_len[1] = 0; } if (dfx_bus_eisa) { unsigned long base_addr = to_eisa_device(bdev)->base_addr; - resource_size_t bar; + resource_size_t bar_lo; + resource_size_t bar_hi; if (dfx_use_mmio) { - bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2); - bar <<= 8; - bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1); - bar <<= 8; - bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0); - bar <<= 16; - *bar_start = bar; - bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2); - bar <<= 8; - bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1); - bar <<= 8; - bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0); - bar <<= 16; - *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1; + bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2); + bar_lo <<= 8; + bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1); + bar_lo <<= 8; + bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0); + bar_lo <<= 8; + bar_start[0] = bar_lo; + bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2); + bar_hi <<= 8; + bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1); + bar_hi <<= 8; + bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0); + bar_hi <<= 8; + bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) + + 1; } else { - *bar_start = base_addr; - *bar_len = PI_ESIC_K_CSR_IO_LEN + - PI_ESIC_K_BURST_HOLDOFF_LEN; + bar_start[0] = base_addr; + bar_len[0] = PI_ESIC_K_CSR_IO_LEN; } + bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF; + bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN; + bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR; + bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN; } if (dfx_bus_tc) { - *bar_start = to_tc_dev(bdev)->resource.start + - PI_TC_K_CSR_OFFSET; - *bar_len = PI_TC_K_CSR_LEN; + bar_start[0] = to_tc_dev(bdev)->resource.start + + PI_TC_K_CSR_OFFSET; + bar_len[0] = PI_TC_K_CSR_LEN; + bar_start[2] = bar_start[1] = 0; + bar_len[2] = bar_len[1] = 0; } } @@ -518,13 +527,14 @@ static int dfx_register(struct device *bdev) { static int version_disp; int dfx_bus_pci = dev_is_pci(bdev); + int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; const char *print_name = dev_name(bdev); struct net_device *dev; DFX_board_t *bp; /* board pointer */ - resource_size_t bar_start = 0; /* pointer to port */ - resource_size_t bar_len = 0; /* resource length */ + resource_size_t bar_start[3]; /* pointers to ports */ + resource_size_t bar_len[3]; /* resource length */ int alloc_size; /* total buffer size used */ struct resource *region; int err = 0; @@ -542,10 +552,13 @@ static int dfx_register(struct device *bdev) } /* Enable PCI device. */ - if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) { - printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n", - print_name); - goto err_out; + if (dfx_bus_pci) { + err = pci_enable_device(to_pci_dev(bdev)); + if (err) { + pr_err("%s: Cannot enable PCI device, aborting\n", + print_name); + goto err_out; + } } SET_NETDEV_DEV(dev, bdev); @@ -554,31 +567,62 @@ static int dfx_register(struct device *bdev) bp->bus_dev = bdev; dev_set_drvdata(bdev, dev); - dfx_get_bars(bdev, &bar_start, &bar_len); + dfx_get_bars(bdev, bar_start, bar_len); + if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) { + pr_err("%s: Cannot use MMIO, no address set, aborting\n", + print_name); + pr_err("%s: Run ECU and set adapter's MMIO location\n", + print_name); + pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\"" + "\n", print_name); + err = -ENXIO; + goto err_out; + } if (dfx_use_mmio) - region = request_mem_region(bar_start, bar_len, print_name); + region = request_mem_region(bar_start[0], bar_len[0], + print_name); else - region = request_region(bar_start, bar_len, print_name); + region = request_region(bar_start[0], bar_len[0], print_name); if (!region) { - printk(KERN_ERR "%s: Cannot reserve I/O resource " - "0x%lx @ 0x%lx, aborting\n", - print_name, (long)bar_len, (long)bar_start); + pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, " + "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name, + (long)bar_len[0], (long)bar_start[0]); err = -EBUSY; goto err_out_disable; } + if (bar_start[1] != 0) { + region = request_region(bar_start[1], bar_len[1], print_name); + if (!region) { + pr_err("%s: Cannot reserve I/O resource " + "0x%lx @ 0x%lx, aborting\n", print_name, + (long)bar_len[1], (long)bar_start[1]); + err = -EBUSY; + goto err_out_csr_region; + } + } + if (bar_start[2] != 0) { + region = request_region(bar_start[2], bar_len[2], print_name); + if (!region) { + pr_err("%s: Cannot reserve I/O resource " + "0x%lx @ 0x%lx, aborting\n", print_name, + (long)bar_len[2], (long)bar_start[2]); + err = -EBUSY; + goto err_out_bh_region; + } + } /* Set up I/O base address. */ if (dfx_use_mmio) { - bp->base.mem = ioremap_nocache(bar_start, bar_len); + bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]); if (!bp->base.mem) { printk(KERN_ERR "%s: Cannot map MMIO\n", print_name); err = -ENOMEM; - goto err_out_region; + goto err_out_esic_region; } } else { - bp->base.port = bar_start; - dev->base_addr = bar_start; + bp->base.port = bar_start[0]; + dev->base_addr = bar_start[0]; } /* Initialize new device structure */ @@ -587,7 +631,7 @@ static int dfx_register(struct device *bdev) if (dfx_bus_pci) pci_set_master(to_pci_dev(bdev)); - if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) { + if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) { err = -ENODEV; goto err_out_unmap; } @@ -615,11 +659,19 @@ err_out_unmap: if (dfx_use_mmio) iounmap(bp->base.mem); -err_out_region: +err_out_esic_region: + if (bar_start[2] != 0) + release_region(bar_start[2], bar_len[2]); + +err_out_bh_region: + if (bar_start[1] != 0) + release_region(bar_start[1], bar_len[1]); + +err_out_csr_region: if (dfx_use_mmio) - release_mem_region(bar_start, bar_len); + release_mem_region(bar_start[0], bar_len[0]); else - release_region(bar_start, bar_len); + release_region(bar_start[0], bar_len[0]); err_out_disable: if (dfx_bus_pci) @@ -711,13 +763,14 @@ static void dfx_bus_init(struct net_device *dev) } /* - * Enable memory decoding (MEMCS0) and/or port decoding + * Enable memory decoding (MEMCS1) and/or port decoding * (IOCS1/IOCS0) as appropriate in Function Control - * Register. IOCS0 is used for PDQ registers, taking 16 - * 32-bit words, while IOCS1 is used for the Burst Holdoff - * register, taking a single 32-bit word only. We use the - * slot-specific I/O range as per the ESIC spec, that is - * set bits 15:12 in the mask registers to mask them out. + * Register. MEMCS1 or IOCS0 is used for PDQ registers, + * taking 16 32-bit words, while IOCS1 is used for the + * Burst Holdoff register, taking a single 32-bit word + * only. We use the slot-specific I/O range as per the + * ESIC spec, that is set bits 15:12 in the mask registers + * to mask them out. */ /* Set the decode range of the board. */ @@ -742,9 +795,11 @@ static void dfx_bus_init(struct net_device *dev) outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0); /* Enable the decoders. */ - val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0; + val = PI_FUNCTION_CNTRL_M_IOCS1; if (dfx_use_mmio) - val |= PI_FUNCTION_CNTRL_M_MEMCS0; + val |= PI_FUNCTION_CNTRL_M_MEMCS1; + else + val |= PI_FUNCTION_CNTRL_M_IOCS0; outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL); /* @@ -838,6 +893,12 @@ static void dfx_bus_uninit(struct net_device *dev) val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); val &= ~PI_CONFIG_STAT_0_M_INT_ENB; outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); + + /* Disable the board. */ + outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL); + + /* Disable memory and port decoders. */ + outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL); } if (dfx_bus_pci) { /* Disable interrupts at PCI bus interface chip (PFI) */ @@ -1061,8 +1122,8 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name, board_name = "DEFEA"; if (dfx_bus_pci) board_name = "DEFPA"; - pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n", - print_name, board_name, dfx_use_mmio ? "" : "I/O ", + pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n", + print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O", (long long)bar_start, dev->irq, dev->dev_addr); /* @@ -3636,8 +3697,8 @@ static void dfx_unregister(struct device *bdev) int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; - resource_size_t bar_start = 0; /* pointer to port */ - resource_size_t bar_len = 0; /* resource length */ + resource_size_t bar_start[3]; /* pointers to ports */ + resource_size_t bar_len[3]; /* resource lengths */ int alloc_size; /* total buffer size used */ unregister_netdev(dev); @@ -3655,12 +3716,16 @@ static void dfx_unregister(struct device *bdev) dfx_bus_uninit(dev); - dfx_get_bars(bdev, &bar_start, &bar_len); + dfx_get_bars(bdev, bar_start, bar_len); + if (bar_start[2] != 0) + release_region(bar_start[2], bar_len[2]); + if (bar_start[1] != 0) + release_region(bar_start[1], bar_len[1]); if (dfx_use_mmio) { iounmap(bp->base.mem); - release_mem_region(bar_start, bar_len); + release_mem_region(bar_start[0], bar_len[0]); } else - release_region(bar_start, bar_len); + release_region(bar_start[0], bar_len[0]); if (dfx_bus_pci) pci_disable_device(to_pci_dev(bdev)); diff --git a/drivers/net/fddi/defxx.h b/drivers/net/fddi/defxx.h index 9527f0182fd41c70df8796b3888e7e1f3234a5a5..9d30fde2ef3cbd2f59b780df5c0390e201bdc6ff 100644 --- a/drivers/net/fddi/defxx.h +++ b/drivers/net/fddi/defxx.h @@ -1481,9 +1481,11 @@ typedef union #define PI_ESIC_K_CSR_IO_LEN 0x40 /* 64 bytes */ #define PI_ESIC_K_BURST_HOLDOFF_LEN 0x04 /* 4 bytes */ +#define PI_ESIC_K_ESIC_CSR_LEN 0x40 /* 64 bytes */ #define PI_DEFEA_K_CSR_IO 0x000 #define PI_DEFEA_K_BURST_HOLDOFF 0x040 +#define PI_ESIC_K_ESIC_CSR 0xC80 #define PI_ESIC_K_SLOT_ID 0xC80 #define PI_ESIC_K_SLOT_CNTRL 0xC84 @@ -1556,7 +1558,7 @@ typedef union #define PI_BURST_HOLDOFF_V_RESERVED 1 #define PI_BURST_HOLDOFF_V_MEM_MAP 0 -/* Define the implicit mask of the Memory Address Mask Register. */ +/* Define the implicit mask of the Memory Address Compare registers. */ #define PI_MEM_ADD_MASK_M 0x3ff diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index c3c4051a089df817b63d382645bdba0affd7f86f..daca0dee88f3634570ab6cf984e80066bd331341 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -675,8 +675,7 @@ out_free: kfree(xbuff); kfree(rbuff); - if (dev) - free_netdev(dev); + free_netdev(dev); out: return err; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 7d76c9523395e5b5137784189d902d9cb1ecaed5..dd867e6cabd6167342b7a3875c427b94321650bf 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -440,7 +440,8 @@ static int negotiate_nvsp_ver(struct hv_device *device, /* NVSPv2 only: Send NDIS config */ memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; - init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu; + init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu + + ETH_HLEN; init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; ret = vmbus_sendpacket(device->channel, init_packet, @@ -560,9 +561,7 @@ int netvsc_device_remove(struct hv_device *device) vmbus_close(device->channel); /* Release all resources */ - if (net_device->sub_cb_buf) - vfree(net_device->sub_cb_buf); - + vfree(net_device->sub_cb_buf); free_netvsc_device(net_device); return 0; } @@ -765,6 +764,9 @@ int netvsc_send(struct hv_device *device, out_channel = device->channel; packet->channel = out_channel; + if (out_channel->rescind) + return -ENODEV; + if (packet->page_buf_cnt) { ret = vmbus_sendpacket_pagebuffer(out_channel, packet->page_buf, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 78ec33f5100b183e31bfc949763acfa4685b9836..15d82eda0baf4141465addd6207c6202f09f2543 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -193,7 +193,9 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) struct flow_keys flow; int data_len; - if (!skb_flow_dissect(skb, &flow) || flow.n_proto != htons(ETH_P_IP)) + if (!skb_flow_dissect(skb, &flow) || + !(flow.n_proto == htons(ETH_P_IP) || + flow.n_proto == htons(ETH_P_IPV6))) return false; if (flow.ip_proto == IPPROTO_TCP) @@ -697,9 +699,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return -ENODEV; if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) - limit = NETVSC_MTU; + limit = NETVSC_MTU - ETH_HLEN; - if (mtu < 68 || mtu > limit) + /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */ + if (mtu < ETH_DATA_LEN || mtu > limit) return -EINVAL; nvdev->start_remove = true; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2b86f0b6f6d18adf8b13bdba263a426a4d89aaa6..ec0c40a8f653cb80e7c8a591db8975b79e4d6066 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -728,7 +728,8 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) rssp->hdr.size = sizeof(struct ndis_recv_scale_param); rssp->flag = 0; rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | - NDIS_HASH_TCP_IPV4; + NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | + NDIS_HASH_TCP_IPV6; rssp->indirect_tabsize = 4*ITAB_NUM; rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); rssp->hashkey_size = HASH_KEYLEN; @@ -957,6 +958,9 @@ static int rndis_filter_close_device(struct rndis_device *dev) return 0; ret = rndis_filter_set_packet_filter(dev, 0); + if (ret == -ENODEV) + ret = 0; + if (ret == 0) dev->state = RNDIS_DEV_INITIALIZED; @@ -997,6 +1001,7 @@ int rndis_filter_device_add(struct hv_device *dev, int t; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); + u32 mtu, size; rndis_device = get_rndis_device(); if (!rndis_device) @@ -1028,6 +1033,14 @@ int rndis_filter_device_add(struct hv_device *dev, return ret; } + /* Get the MTU from the host */ + size = sizeof(u32); + ret = rndis_filter_query_device(rndis_device, + RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, + &mtu, &size); + if (ret == 0 && size == sizeof(u32)) + net_device->ndev->mtu = mtu; + /* Get the mac address */ ret = rndis_filter_query_device_mac(rndis_device); if (ret != 0) { diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 391a916622a94d0354968ef4c1627c918fcf234d..1a3c3e57aa0b67750eccb0d2fc62eefb4eef0050 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -10,16 +10,6 @@ menuconfig IEEE802154_DRIVERS If you say N, all options in this submenu will be skipped and disabled. -config IEEE802154_FAKEHARD - tristate "Fake LR-WPAN driver with several interconnected devices" - depends on IEEE802154_DRIVERS - ---help--- - Say Y here to enable the fake driver that serves as an example - of HardMAC device driver. - - This driver can also be built as a module. To do so say M here. - The module will be called 'fakehard'. - config IEEE802154_FAKELB depends on IEEE802154_DRIVERS && MAC802154 tristate "IEEE 802.15.4 loopback driver" diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile index 655cb95e6e247b42764151ed6a5768bd5f58d9b8..d77fa4d77e27d8e19e43f29f2161633c6754dd84 100644 --- a/drivers/net/ieee802154/Makefile +++ b/drivers/net/ieee802154/Makefile @@ -1,4 +1,3 @@ -obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index c9d2a752abd7b176e2f4889d3fc9a9cf4d8fd322..1c0135620c62261dca42b32c3958d43c3dca3ded 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Dmitry Eremin-Solenikov * Alexander Smirnov @@ -33,10 +29,10 @@ #include #include #include +#include -#include #include -#include +#include struct at86rf230_local; /* at86rf2xx chip depend data. @@ -50,15 +46,11 @@ struct at86rf2xx_chip_data { u16 t_off_to_tx_on; u16 t_frame; u16 t_p_ack; - /* short interframe spacing time */ - u16 t_sifs; - /* long interframe spacing time */ - u16 t_lifs; /* completion timeout for tx in msecs */ u16 t_tx_timeout; int rssi_base_val; - int (*set_channel)(struct at86rf230_local *, int, int); + int (*set_channel)(struct at86rf230_local *, u8, u8); int (*get_desense_steps)(struct at86rf230_local *, s32); }; @@ -74,12 +66,14 @@ struct at86rf230_state_change { void (*complete)(void *context); u8 from_state; u8 to_state; + + bool irq_enable; }; struct at86rf230_local { struct spi_device *spi; - struct ieee802154_dev *dev; + struct ieee802154_hw *hw; struct at86rf2xx_chip_data *data; struct regmap *regmap; @@ -89,10 +83,10 @@ struct at86rf230_local { struct at86rf230_state_change irq; bool tx_aret; + s8 max_frame_retries; bool is_tx; /* spinlock for is_tx protection */ spinlock_t lock; - struct completion tx_complete; struct sk_buff *tx_skb; struct at86rf230_state_change tx; }; @@ -291,10 +285,11 @@ struct at86rf230_local { #define AT86RF2XX_NUMREGS 0x3F -static int +static void at86rf230_async_state_change(struct at86rf230_local *lp, struct at86rf230_state_change *ctx, - const u8 state, void (*complete)(void *context)); + const u8 state, void (*complete)(void *context), + const bool irq_enable); static inline int __at86rf230_write(struct at86rf230_local *lp, @@ -451,7 +446,8 @@ at86rf230_async_error_recover(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL); + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false); + ieee802154_wake_queue(lp->hw); } static void @@ -461,21 +457,31 @@ at86rf230_async_error(struct at86rf230_local *lp, dev_err(&lp->spi->dev, "spi_async error %d\n", rc); at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, - at86rf230_async_error_recover); + at86rf230_async_error_recover, false); } /* Generic function to get some register value in async mode */ -static int +static void at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg, struct at86rf230_state_change *ctx, - void (*complete)(void *context)) + void (*complete)(void *context), + const bool irq_enable) { + int rc; + u8 *tx_buf = ctx->buf; tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG; ctx->trx.len = 2; ctx->msg.complete = complete; - return spi_async(lp->spi, &ctx->msg); + ctx->irq_enable = irq_enable; + rc = spi_async(lp->spi, &ctx->msg); + if (rc) { + if (irq_enable) + enable_irq(lp->spi->irq); + + at86rf230_async_error(lp, ctx, rc); + } } static void @@ -512,7 +518,8 @@ at86rf230_async_state_assert(void *context) if (ctx->to_state == STATE_TX_ON) { at86rf230_async_state_change(lp, ctx, STATE_FORCE_TX_ON, - ctx->complete); + ctx->complete, + ctx->irq_enable); return; } } @@ -535,7 +542,6 @@ at86rf230_async_state_delay(void *context) struct at86rf230_local *lp = ctx->lp; struct at86rf2xx_chip_data *c = lp->data; bool force = false; - int rc; /* The force state changes are will show as normal states in the * state status subregister. We change the to_state to the @@ -604,10 +610,9 @@ at86rf230_async_state_delay(void *context) udelay(1); change: - rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, - at86rf230_async_state_assert); - if (rc) - dev_err(&lp->spi->dev, "spi_async error %d\n", rc); + at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, + at86rf230_async_state_assert, + ctx->irq_enable); } static void @@ -622,10 +627,9 @@ at86rf230_async_state_change_start(void *context) /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */ if (trx_state == STATE_TRANSITION_IN_PROGRESS) { udelay(1); - rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, - at86rf230_async_state_change_start); - if (rc) - dev_err(&lp->spi->dev, "spi_async error %d\n", rc); + at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, + at86rf230_async_state_change_start, + ctx->irq_enable); return; } @@ -647,20 +651,27 @@ at86rf230_async_state_change_start(void *context) ctx->trx.len = 2; ctx->msg.complete = at86rf230_async_state_delay; rc = spi_async(lp->spi, &ctx->msg); - if (rc) - dev_err(&lp->spi->dev, "spi_async error %d\n", rc); + if (rc) { + if (ctx->irq_enable) + enable_irq(lp->spi->irq); + + at86rf230_async_error(lp, &lp->state, rc); + } } -static int +static void at86rf230_async_state_change(struct at86rf230_local *lp, struct at86rf230_state_change *ctx, - const u8 state, void (*complete)(void *context)) + const u8 state, void (*complete)(void *context), + const bool irq_enable) { /* Initialization for the state change context */ ctx->to_state = state; ctx->complete = complete; - return at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, - at86rf230_async_state_change_start); + ctx->irq_enable = irq_enable; + at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, + at86rf230_async_state_change_start, + irq_enable); } static void @@ -681,17 +692,16 @@ at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state) { int rc; - rc = at86rf230_async_state_change(lp, &lp->state, state, - at86rf230_sync_state_change_complete); - if (rc) { - at86rf230_async_error(lp, &lp->state, rc); - return rc; - } + at86rf230_async_state_change(lp, &lp->state, state, + at86rf230_sync_state_change_complete, + false); rc = wait_for_completion_timeout(&lp->state_complete, msecs_to_jiffies(100)); - if (!rc) + if (!rc) { + at86rf230_async_error(lp, &lp->state, -ETIMEDOUT); return -ETIMEDOUT; + } return 0; } @@ -701,8 +711,14 @@ at86rf230_tx_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; + struct sk_buff *skb = lp->tx_skb; + + enable_irq(lp->spi->irq); - complete(&lp->tx_complete); + if (lp->max_frame_retries <= 0) + ieee802154_xmit_complete(lp->hw, skb, true); + else + ieee802154_xmit_complete(lp->hw, skb, false); } static void @@ -710,12 +726,9 @@ at86rf230_tx_on(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - int rc; - rc = at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON, - at86rf230_tx_complete); - if (rc) - at86rf230_async_error(lp, ctx, rc); + at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON, + at86rf230_tx_complete, true); } static void @@ -723,12 +736,9 @@ at86rf230_tx_trac_error(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - int rc; - rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON, - at86rf230_tx_on); - if (rc) - at86rf230_async_error(lp, ctx, rc); + at86rf230_async_state_change(lp, ctx, STATE_TX_ON, + at86rf230_tx_on, true); } static void @@ -738,17 +748,14 @@ at86rf230_tx_trac_check(void *context) struct at86rf230_local *lp = ctx->lp; const u8 *buf = ctx->buf; const u8 trac = (buf[1] & 0xe0) >> 5; - int rc; /* If trac status is different than zero we need to do a state change * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver * state to TX_ON. */ if (trac) { - rc = at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, - at86rf230_tx_trac_error); - if (rc) - at86rf230_async_error(lp, ctx, rc); + at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, + at86rf230_tx_trac_error, true); return; } @@ -761,51 +768,29 @@ at86rf230_tx_trac_status(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - int rc; - rc = at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx, - at86rf230_tx_trac_check); - if (rc) - at86rf230_async_error(lp, ctx, rc); + at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx, + at86rf230_tx_trac_check, true); } static void at86rf230_rx(struct at86rf230_local *lp, - const u8 *data, u8 len) + const u8 *data, const u8 len, const u8 lqi) { - u8 lqi; struct sk_buff *skb; u8 rx_local_buf[AT86RF2XX_MAX_BUF]; - if (len < 2) - return; - - /* read full frame buffer and invalid lqi value to lowest - * indicator if frame was is in a corrupted state. - */ - if (len > IEEE802154_MTU) { - lqi = 0; - len = IEEE802154_MTU; - dev_vdbg(&lp->spi->dev, "corrupted frame received\n"); - } else { - lqi = data[len]; - } - memcpy(rx_local_buf, data, len); enable_irq(lp->spi->irq); - skb = alloc_skb(IEEE802154_MTU, GFP_ATOMIC); + skb = dev_alloc_skb(IEEE802154_MTU); if (!skb) { dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n"); return; } memcpy(skb_put(skb, len), rx_local_buf, len); - - /* We do not put CRC into the frame */ - skb_trim(skb, len - 2); - - ieee802154_rx_irqsafe(lp->dev, skb, lqi); + ieee802154_rx_irqsafe(lp->hw, skb, lqi); } static void @@ -814,20 +799,31 @@ at86rf230_rx_read_frame_complete(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; const u8 *buf = lp->irq.buf; - const u8 len = buf[1]; + u8 len = buf[1]; + + if (!ieee802154_is_valid_psdu_len(len)) { + dev_vdbg(&lp->spi->dev, "corrupted frame received\n"); + len = IEEE802154_MTU; + } - at86rf230_rx(lp, buf + 2, len); + at86rf230_rx(lp, buf + 2, len, buf[2 + len]); } -static int +static void at86rf230_rx_read_frame(struct at86rf230_local *lp) { + int rc; + u8 *buf = lp->irq.buf; buf[0] = CMD_FB; lp->irq.trx.len = AT86RF2XX_MAX_BUF; lp->irq.msg.complete = at86rf230_rx_read_frame_complete; - return spi_async(lp->spi, &lp->irq.msg); + rc = spi_async(lp->spi, &lp->irq.msg); + if (rc) { + enable_irq(lp->spi->irq); + at86rf230_async_error(lp, &lp->irq, rc); + } } static void @@ -835,7 +831,6 @@ at86rf230_rx_trac_check(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - int rc; /* Possible check on trac status here. This could be useful to make * some stats why receive is failed. Not used at the moment, but it's @@ -843,34 +838,31 @@ at86rf230_rx_trac_check(void *context) * The programming guide say do it so. */ - rc = at86rf230_rx_read_frame(lp); - if (rc) { - enable_irq(lp->spi->irq); - at86rf230_async_error(lp, ctx, rc); - } + at86rf230_rx_read_frame(lp); } -static int +static void at86rf230_irq_trx_end(struct at86rf230_local *lp) { spin_lock(&lp->lock); if (lp->is_tx) { lp->is_tx = 0; spin_unlock(&lp->lock); - enable_irq(lp->spi->irq); if (lp->tx_aret) - return at86rf230_async_state_change(lp, &lp->irq, - STATE_FORCE_TX_ON, - at86rf230_tx_trac_status); + at86rf230_async_state_change(lp, &lp->irq, + STATE_FORCE_TX_ON, + at86rf230_tx_trac_status, + true); else - return at86rf230_async_state_change(lp, &lp->irq, - STATE_RX_AACK_ON, - at86rf230_tx_complete); + at86rf230_async_state_change(lp, &lp->irq, + STATE_RX_AACK_ON, + at86rf230_tx_complete, + true); } else { spin_unlock(&lp->lock); - return at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq, - at86rf230_rx_trac_check); + at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq, + at86rf230_rx_trac_check, true); } } @@ -881,12 +873,9 @@ at86rf230_irq_status(void *context) struct at86rf230_local *lp = ctx->lp; const u8 *buf = lp->irq.buf; const u8 irq = buf[1]; - int rc; if (irq & IRQ_TRX_END) { - rc = at86rf230_irq_trx_end(lp); - if (rc) - at86rf230_async_error(lp, ctx, rc); + at86rf230_irq_trx_end(lp); } else { enable_irq(lp->spi->irq); dev_err(&lp->spi->dev, "not supported irq %02x received\n", @@ -901,13 +890,14 @@ static irqreturn_t at86rf230_isr(int irq, void *data) u8 *buf = ctx->buf; int rc; - disable_irq_nosync(lp->spi->irq); + disable_irq_nosync(irq); buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG; ctx->trx.len = 2; ctx->msg.complete = at86rf230_irq_status; rc = spi_async(lp->spi, &ctx->msg); if (rc) { + enable_irq(irq); at86rf230_async_error(lp, ctx, rc); return IRQ_NONE; } @@ -960,22 +950,18 @@ at86rf230_xmit_tx_on(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - int rc; - rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, - at86rf230_write_frame); - if (rc) - at86rf230_async_error(lp, ctx, rc); + at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, + at86rf230_write_frame, false); } static int -at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) +at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { - struct at86rf230_local *lp = dev->priv; + struct at86rf230_local *lp = hw->priv; struct at86rf230_state_change *ctx = &lp->tx; void (*tx_complete)(void *context) = at86rf230_write_frame; - int rc; lp->tx_skb = skb; @@ -986,61 +972,39 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) if (lp->tx_aret) tx_complete = at86rf230_xmit_tx_on; - rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON, - tx_complete); - if (rc) { - at86rf230_async_error(lp, ctx, rc); - return rc; - } - rc = wait_for_completion_interruptible_timeout(&lp->tx_complete, - msecs_to_jiffies(lp->data->t_tx_timeout)); - if (!rc) { - at86rf230_async_error(lp, ctx, rc); - return -ETIMEDOUT; - } - - /* Interfame spacing time, which is phy depend. - * TODO - * Move this handling in MAC 802.15.4 layer. - * This is currently a workaround to avoid fragmenation issues. - */ - if (skb->len > 18) - usleep_range(lp->data->t_lifs, lp->data->t_lifs + 10); - else - usleep_range(lp->data->t_sifs, lp->data->t_sifs + 10); + at86rf230_async_state_change(lp, ctx, STATE_TX_ON, tx_complete, false); return 0; } static int -at86rf230_ed(struct ieee802154_dev *dev, u8 *level) +at86rf230_ed(struct ieee802154_hw *hw, u8 *level) { - might_sleep(); BUG_ON(!level); *level = 0xbe; return 0; } static int -at86rf230_start(struct ieee802154_dev *dev) +at86rf230_start(struct ieee802154_hw *hw) { - return at86rf230_sync_state_change(dev->priv, STATE_RX_AACK_ON); + return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON); } static void -at86rf230_stop(struct ieee802154_dev *dev) +at86rf230_stop(struct ieee802154_hw *hw) { - at86rf230_sync_state_change(dev->priv, STATE_FORCE_TRX_OFF); + at86rf230_sync_state_change(hw->priv, STATE_FORCE_TRX_OFF); } static int -at86rf23x_set_channel(struct at86rf230_local *lp, int page, int channel) +at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) { return at86rf230_write_subreg(lp, SR_CHANNEL, channel); } static int -at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel) +at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) { int rc; @@ -1061,44 +1025,60 @@ at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel) if (rc < 0) return rc; + /* This sets the symbol_duration according frequency on the 212. + * TODO move this handling while set channel and page in cfg802154. + * We can do that, this timings are according 802.15.4 standard. + * If we do that in cfg802154, this is a more generic calculation. + * + * This should also protected from ifs_timer. Means cancel timer and + * init with a new value. For now, this is okay. + */ + if (channel == 0) { + if (page == 0) { + /* SUB:0 and BPSK:0 -> BPSK-20 */ + lp->hw->phy->symbol_duration = 50; + } else { + /* SUB:1 and BPSK:0 -> BPSK-40 */ + lp->hw->phy->symbol_duration = 25; + } + } else { + if (page == 0) + /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */ + lp->hw->phy->symbol_duration = 40; + else + /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */ + lp->hw->phy->symbol_duration = 16; + } + + lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD * + lp->hw->phy->symbol_duration; + lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD * + lp->hw->phy->symbol_duration; + return at86rf230_write_subreg(lp, SR_CHANNEL, channel); } static int -at86rf230_channel(struct ieee802154_dev *dev, int page, int channel) +at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { - struct at86rf230_local *lp = dev->priv; + struct at86rf230_local *lp = hw->priv; int rc; - might_sleep(); - - if (page < 0 || page > 31 || - !(lp->dev->phy->channels_supported[page] & BIT(channel))) { - WARN_ON(1); - return -EINVAL; - } - rc = lp->data->set_channel(lp, page, channel); - if (rc < 0) - return rc; - /* Wait for PLL */ usleep_range(lp->data->t_channel_switch, lp->data->t_channel_switch + 10); - dev->phy->current_channel = channel; - dev->phy->current_page = page; - - return 0; + return rc; } static int -at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev, +at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { - struct at86rf230_local *lp = dev->priv; + struct at86rf230_local *lp = hw->priv; - if (changed & IEEE802515_AFILT_SADDR_CHANGED) { + if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 addr = le16_to_cpu(filt->short_addr); dev_vdbg(&lp->spi->dev, @@ -1107,7 +1087,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev, __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8); } - if (changed & IEEE802515_AFILT_PANID_CHANGED) { + if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 pan = le16_to_cpu(filt->pan_id); dev_vdbg(&lp->spi->dev, @@ -1116,7 +1096,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev, __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8); } - if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) { + if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { u8 i, addr[8]; memcpy(addr, &filt->ieee_addr, 8); @@ -1126,7 +1106,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev, __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]); } - if (changed & IEEE802515_AFILT_PANC_CHANGED) { + if (changed & IEEE802154_AFILT_PANC_CHANGED) { dev_vdbg(&lp->spi->dev, "at86rf230_set_hw_addr_filt called for panc change\n"); if (filt->pan_coord) @@ -1139,9 +1119,9 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev, } static int -at86rf230_set_txpower(struct ieee802154_dev *dev, int db) +at86rf230_set_txpower(struct ieee802154_hw *hw, int db) { - struct at86rf230_local *lp = dev->priv; + struct at86rf230_local *lp = hw->priv; /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five * bits decrease power in 1dB steps. 0x60 represents extra PA gain of @@ -1158,17 +1138,17 @@ at86rf230_set_txpower(struct ieee802154_dev *dev, int db) } static int -at86rf230_set_lbt(struct ieee802154_dev *dev, bool on) +at86rf230_set_lbt(struct ieee802154_hw *hw, bool on) { - struct at86rf230_local *lp = dev->priv; + struct at86rf230_local *lp = hw->priv; return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on); } static int -at86rf230_set_cca_mode(struct ieee802154_dev *dev, u8 mode) +at86rf230_set_cca_mode(struct ieee802154_hw *hw, u8 mode) { - struct at86rf230_local *lp = dev->priv; + struct at86rf230_local *lp = hw->priv; return at86rf230_write_subreg(lp, SR_CCA_MODE, mode); } @@ -1186,9 +1166,9 @@ at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level) } static int -at86rf230_set_cca_ed_level(struct ieee802154_dev *dev, s32 level) +at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level) { - struct at86rf230_local *lp = dev->priv; + struct at86rf230_local *lp = hw->priv; if (level < lp->data->rssi_base_val || level > 30) return -EINVAL; @@ -1198,15 +1178,12 @@ at86rf230_set_cca_ed_level(struct ieee802154_dev *dev, s32 level) } static int -at86rf230_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be, +at86rf230_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries) { - struct at86rf230_local *lp = dev->priv; + struct at86rf230_local *lp = hw->priv; int rc; - if (min_be > max_be || max_be > 8 || retries > 5) - return -EINVAL; - rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be); if (rc) return rc; @@ -1219,15 +1196,13 @@ at86rf230_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be, } static int -at86rf230_set_frame_retries(struct ieee802154_dev *dev, s8 retries) +at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { - struct at86rf230_local *lp = dev->priv; + struct at86rf230_local *lp = hw->priv; int rc = 0; - if (retries < -1 || retries > 15) - return -EINVAL; - lp->tx_aret = retries >= 0; + lp->max_frame_retries = retries; if (retries >= 0) rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries); @@ -1235,9 +1210,36 @@ at86rf230_set_frame_retries(struct ieee802154_dev *dev, s8 retries) return rc; } -static struct ieee802154_ops at86rf230_ops = { +static int +at86rf230_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) +{ + struct at86rf230_local *lp = hw->priv; + int rc; + + if (on) { + rc = at86rf230_write_subreg(lp, SR_AACK_DIS_ACK, 1); + if (rc < 0) + return rc; + + rc = at86rf230_write_subreg(lp, SR_AACK_PROM_MODE, 1); + if (rc < 0) + return rc; + } else { + rc = at86rf230_write_subreg(lp, SR_AACK_PROM_MODE, 0); + if (rc < 0) + return rc; + + rc = at86rf230_write_subreg(lp, SR_AACK_DIS_ACK, 0); + if (rc < 0) + return rc; + } + + return 0; +} + +static const struct ieee802154_ops at86rf230_ops = { .owner = THIS_MODULE, - .xmit = at86rf230_xmit, + .xmit_async = at86rf230_xmit, .ed = at86rf230_ed, .set_channel = at86rf230_channel, .start = at86rf230_start, @@ -1249,6 +1251,7 @@ static struct ieee802154_ops at86rf230_ops = { .set_cca_ed_level = at86rf230_set_cca_ed_level, .set_csma_params = at86rf230_set_csma_params, .set_frame_retries = at86rf230_set_frame_retries, + .set_promiscuous_mode = at86rf230_set_promiscuous_mode, }; static struct at86rf2xx_chip_data at86rf233_data = { @@ -1259,8 +1262,6 @@ static struct at86rf2xx_chip_data at86rf233_data = { .t_off_to_tx_on = 80, .t_frame = 4096, .t_p_ack = 545, - .t_sifs = 192, - .t_lifs = 480, .t_tx_timeout = 2000, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, @@ -1275,8 +1276,6 @@ static struct at86rf2xx_chip_data at86rf231_data = { .t_off_to_tx_on = 110, .t_frame = 4096, .t_p_ack = 545, - .t_sifs = 192, - .t_lifs = 480, .t_tx_timeout = 2000, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, @@ -1291,8 +1290,6 @@ static struct at86rf2xx_chip_data at86rf212_data = { .t_off_to_tx_on = 200, .t_frame = 4096, .t_p_ack = 545, - .t_sifs = 192, - .t_lifs = 480, .t_tx_timeout = 2000, .rssi_base_val = -100, .set_channel = at86rf212_set_channel, @@ -1354,7 +1351,11 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) return -EINVAL; } - return 0; + /* Force setting slotted operation bit to 0. Sometimes the atben + * sets this bit and I don't know why. We set this always force + * to zero while probing. + */ + return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0); } static struct at86rf230_platform_data * @@ -1409,9 +1410,10 @@ at86rf230_detect_device(struct at86rf230_local *lp) return -EINVAL; } - lp->dev->extra_tx_headroom = 0; - lp->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK | - IEEE802154_HW_TXPOWER | IEEE802154_HW_CSMA; + lp->hw->extra_tx_headroom = 0; + lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK | + IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET | + IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; switch (part) { case 2: @@ -1421,15 +1423,19 @@ at86rf230_detect_device(struct at86rf230_local *lp) case 3: chip = "at86rf231"; lp->data = &at86rf231_data; - lp->dev->phy->channels_supported[0] = 0x7FFF800; + lp->hw->phy->channels_supported[0] = 0x7FFF800; + lp->hw->phy->current_channel = 11; + lp->hw->phy->symbol_duration = 16; break; case 7: chip = "at86rf212"; if (version == 1) { lp->data = &at86rf212_data; - lp->dev->flags |= IEEE802154_HW_LBT; - lp->dev->phy->channels_supported[0] = 0x00007FF; - lp->dev->phy->channels_supported[2] = 0x00007FF; + lp->hw->flags |= IEEE802154_HW_LBT; + lp->hw->phy->channels_supported[0] = 0x00007FF; + lp->hw->phy->channels_supported[2] = 0x00007FF; + lp->hw->phy->current_channel = 5; + lp->hw->phy->symbol_duration = 25; } else { rc = -ENOTSUPP; } @@ -1437,7 +1443,9 @@ at86rf230_detect_device(struct at86rf230_local *lp) case 11: chip = "at86rf233"; lp->data = &at86rf233_data; - lp->dev->phy->channels_supported[0] = 0x7FFF800; + lp->hw->phy->channels_supported[0] = 0x7FFF800; + lp->hw->phy->current_channel = 13; + lp->hw->phy->symbol_duration = 16; break; default: chip = "unkown"; @@ -1478,7 +1486,7 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp) static int at86rf230_probe(struct spi_device *spi) { struct at86rf230_platform_data *pdata; - struct ieee802154_dev *dev; + struct ieee802154_hw *hw; struct at86rf230_local *lp; unsigned int status; int rc, irq_type; @@ -1517,14 +1525,16 @@ static int at86rf230_probe(struct spi_device *spi) usleep_range(120, 240); } - dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops); - if (!dev) + hw = ieee802154_alloc_hw(sizeof(*lp), &at86rf230_ops); + if (!hw) return -ENOMEM; - lp = dev->priv; - lp->dev = dev; + lp = hw->priv; + lp->hw = hw; lp->spi = spi; - dev->parent = &spi->dev; + hw->parent = &spi->dev; + hw->vif_data_size = sizeof(*lp); + ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); lp->regmap = devm_regmap_init_spi(spi, &at86rf230_regmap_spi_config); if (IS_ERR(lp->regmap)) { @@ -1541,7 +1551,6 @@ static int at86rf230_probe(struct spi_device *spi) goto free_dev; spin_lock_init(&lp->lock); - init_completion(&lp->tx_complete); init_completion(&lp->state_complete); spi_set_drvdata(spi, lp); @@ -1564,14 +1573,14 @@ static int at86rf230_probe(struct spi_device *spi) if (rc) goto free_dev; - rc = ieee802154_register_device(lp->dev); + rc = ieee802154_register_hw(lp->hw); if (rc) goto free_dev; return rc; free_dev: - ieee802154_free_device(lp->dev); + ieee802154_free_hw(lp->hw); return rc; } @@ -1582,8 +1591,8 @@ static int at86rf230_remove(struct spi_device *spi) /* mask all at86rf230 irq's */ at86rf230_write_subreg(lp, SR_IRQ_MASK, 0); - ieee802154_unregister_device(lp->dev); - ieee802154_free_device(lp->dev); + ieee802154_unregister_hw(lp->hw); + ieee802154_free_hw(lp->hw); dev_dbg(&spi->dev, "unregistered at86rf230\n"); return 0; diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index 8a5ac7ab2300008ef2c8839272eb72b5eae1759c..f9df9fa86d5f0010d2d87ecbeca4a8df395c08c1 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -21,10 +21,10 @@ #include #include #include +#include #include -#include -#include +#include #define SPI_COMMAND_BUFFER 3 #define HIGH 1 @@ -193,7 +193,7 @@ /* Driver private information */ struct cc2520_private { struct spi_device *spi; /* SPI device structure */ - struct ieee802154_dev *dev; /* IEEE-802.15.4 device */ + struct ieee802154_hw *hw; /* IEEE-802.15.4 device */ u8 *buf; /* SPI TX/Rx data buffer */ struct mutex buffer_mutex; /* SPI buffer mutex */ bool is_tx; /* Flag for sync b/w Tx and Rx */ @@ -453,20 +453,20 @@ cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len, u8 *lqi) return status; } -static int cc2520_start(struct ieee802154_dev *dev) +static int cc2520_start(struct ieee802154_hw *hw) { - return cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRXON); + return cc2520_cmd_strobe(hw->priv, CC2520_CMD_SRXON); } -static void cc2520_stop(struct ieee802154_dev *dev) +static void cc2520_stop(struct ieee802154_hw *hw) { - cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRFOFF); + cc2520_cmd_strobe(hw->priv, CC2520_CMD_SRFOFF); } static int -cc2520_tx(struct ieee802154_dev *dev, struct sk_buff *skb) +cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb) { - struct cc2520_private *priv = dev->priv; + struct cc2520_private *priv = hw->priv; unsigned long flags; int rc; u8 status = 0; @@ -524,7 +524,7 @@ static int cc2520_rx(struct cc2520_private *priv) if (len < 2 || len > IEEE802154_MTU) return -EINVAL; - skb = alloc_skb(len, GFP_KERNEL); + skb = dev_alloc_skb(len); if (!skb) return -ENOMEM; @@ -536,7 +536,7 @@ static int cc2520_rx(struct cc2520_private *priv) skb_trim(skb, skb->len - 2); - ieee802154_rx_irqsafe(priv->dev, skb, lqi); + ieee802154_rx_irqsafe(priv->hw, skb, lqi); dev_vdbg(&priv->spi->dev, "RXFIFO: %x %x\n", len, lqi); @@ -544,9 +544,9 @@ static int cc2520_rx(struct cc2520_private *priv) } static int -cc2520_ed(struct ieee802154_dev *dev, u8 *level) +cc2520_ed(struct ieee802154_hw *hw, u8 *level) { - struct cc2520_private *priv = dev->priv; + struct cc2520_private *priv = hw->priv; u8 status = 0xff; u8 rssi; int ret; @@ -569,12 +569,11 @@ cc2520_ed(struct ieee802154_dev *dev, u8 *level) } static int -cc2520_set_channel(struct ieee802154_dev *dev, int page, int channel) +cc2520_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { - struct cc2520_private *priv = dev->priv; + struct cc2520_private *priv = hw->priv; int ret; - might_sleep(); dev_dbg(&priv->spi->dev, "trying to set channel\n"); BUG_ON(page != 0); @@ -588,12 +587,12 @@ cc2520_set_channel(struct ieee802154_dev *dev, int page, int channel) } static int -cc2520_filter(struct ieee802154_dev *dev, +cc2520_filter(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { - struct cc2520_private *priv = dev->priv; + struct cc2520_private *priv = hw->priv; - if (changed & IEEE802515_AFILT_PANID_CHANGED) { + if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 panid = le16_to_cpu(filt->pan_id); dev_vdbg(&priv->spi->dev, @@ -602,7 +601,7 @@ cc2520_filter(struct ieee802154_dev *dev, sizeof(panid), (u8 *)&panid); } - if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) { + if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { dev_vdbg(&priv->spi->dev, "cc2520_filter called for IEEE addr\n"); cc2520_write_ram(priv, CC2520RAM_IEEEADDR, @@ -610,7 +609,7 @@ cc2520_filter(struct ieee802154_dev *dev, (u8 *)&filt->ieee_addr); } - if (changed & IEEE802515_AFILT_SADDR_CHANGED) { + if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 addr = le16_to_cpu(filt->short_addr); dev_vdbg(&priv->spi->dev, @@ -619,7 +618,7 @@ cc2520_filter(struct ieee802154_dev *dev, sizeof(addr), (u8 *)&addr); } - if (changed & IEEE802515_AFILT_PANC_CHANGED) { + if (changed & IEEE802154_AFILT_PANC_CHANGED) { dev_vdbg(&priv->spi->dev, "cc2520_filter called for panc change\n"); if (filt->pan_coord) @@ -631,11 +630,11 @@ cc2520_filter(struct ieee802154_dev *dev, return 0; } -static struct ieee802154_ops cc2520_ops = { +static const struct ieee802154_ops cc2520_ops = { .owner = THIS_MODULE, .start = cc2520_start, .stop = cc2520_stop, - .xmit = cc2520_tx, + .xmit_sync = cc2520_tx, .ed = cc2520_ed, .set_channel = cc2520_set_channel, .set_hw_addr_filt = cc2520_filter, @@ -645,27 +644,29 @@ static int cc2520_register(struct cc2520_private *priv) { int ret = -ENOMEM; - priv->dev = ieee802154_alloc_device(sizeof(*priv), &cc2520_ops); - if (!priv->dev) + priv->hw = ieee802154_alloc_hw(sizeof(*priv), &cc2520_ops); + if (!priv->hw) goto err_ret; - priv->dev->priv = priv; - priv->dev->parent = &priv->spi->dev; - priv->dev->extra_tx_headroom = 0; + priv->hw->priv = priv; + priv->hw->parent = &priv->spi->dev; + priv->hw->extra_tx_headroom = 0; + priv->hw->vif_data_size = sizeof(*priv); /* We do support only 2.4 Ghz */ - priv->dev->phy->channels_supported[0] = 0x7FFF800; - priv->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK; + priv->hw->phy->channels_supported[0] = 0x7FFF800; + priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK | + IEEE802154_HW_AFILT; dev_vdbg(&priv->spi->dev, "registered cc2520\n"); - ret = ieee802154_register_device(priv->dev); + ret = ieee802154_register_hw(priv->hw); if (ret) goto err_free_device; return 0; err_free_device: - ieee802154_free_device(priv->dev); + ieee802154_free_hw(priv->hw); err_ret: return ret; } @@ -857,7 +858,7 @@ static int cc2520_probe(struct spi_device *spi) pinctrl = devm_pinctrl_get_select_default(&spi->dev); if (IS_ERR(pinctrl)) dev_warn(&spi->dev, - "pinctrl pins are not configured"); + "pinctrl pins are not configured\n"); pdata = cc2520_get_platform_data(spi); if (!pdata) { @@ -1002,8 +1003,8 @@ static int cc2520_remove(struct spi_device *spi) mutex_destroy(&priv->buffer_mutex); flush_work(&priv->fifop_irqwork); - ieee802154_unregister_device(priv->dev); - ieee802154_free_device(priv->dev); + ieee802154_unregister_hw(priv->hw); + ieee802154_free_hw(priv->hw); return 0; } diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c deleted file mode 100644 index 6cbc56ad9ff49256fa218e87c031c5c9f371d17a..0000000000000000000000000000000000000000 --- a/drivers/net/ieee802154/fakehard.c +++ /dev/null @@ -1,430 +0,0 @@ -/* - * Sample driver for HardMAC IEEE 802.15.4 devices - * - * Copyright (C) 2009 Siemens AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Written by: - * Dmitry Eremin-Solenikov - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -struct fakehard_priv { - struct wpan_phy *phy; -}; - -static struct wpan_phy *fake_to_phy(const struct net_device *dev) -{ - struct fakehard_priv *priv = netdev_priv(dev); - return priv->phy; -} - -/** - * fake_get_phy - Return a phy corresponding to this device. - * @dev: The network device for which to return the wan-phy object - * - * This function returns a wpan-phy object corresponding to the passed - * network device. Reference counter for wpan-phy object is incremented, - * so when the wpan-phy isn't necessary, you should drop the reference - * via @wpan_phy_put() call. - */ -static struct wpan_phy *fake_get_phy(const struct net_device *dev) -{ - struct wpan_phy *phy = fake_to_phy(dev); - return to_phy(get_device(&phy->dev)); -} - -/** - * fake_get_pan_id - Retrieve the PAN ID of the device. - * @dev: The network device to retrieve the PAN of. - * - * Return the ID of the PAN from the PIB. - */ -static __le16 fake_get_pan_id(const struct net_device *dev) -{ - BUG_ON(dev->type != ARPHRD_IEEE802154); - - return cpu_to_le16(0xeba1); -} - -/** - * fake_get_short_addr - Retrieve the short address of the device. - * @dev: The network device to retrieve the short address of. - * - * Returns the IEEE 802.15.4 short-form address cached for this - * device. If the device has not yet had a short address assigned - * then this should return 0xFFFF to indicate a lack of association. - */ -static __le16 fake_get_short_addr(const struct net_device *dev) -{ - BUG_ON(dev->type != ARPHRD_IEEE802154); - - return cpu_to_le16(0x1); -} - -/** - * fake_get_dsn - Retrieve the DSN of the device. - * @dev: The network device to retrieve the DSN for. - * - * Returns the IEEE 802.15.4 DSN for the network device. - * The DSN is the sequence number which will be added to each - * packet or MAC command frame by the MAC during transmission. - * - * DSN means 'Data Sequence Number'. - * - * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006 - * document. - */ -static u8 fake_get_dsn(const struct net_device *dev) -{ - BUG_ON(dev->type != ARPHRD_IEEE802154); - - return 0x00; /* DSN are implemented in HW, so return just 0 */ -} - -/** - * fake_assoc_req - Make an association request to the HW. - * @dev: The network device which we are associating to a network. - * @addr: The coordinator with which we wish to associate. - * @channel: The channel on which to associate. - * @cap: The capability information field to use in the association. - * - * Start an association with a coordinator. The coordinator's address - * and PAN ID can be found in @addr. - * - * Note: This is in section 7.3.1 and 7.5.3.1 of the IEEE - * 802.15.4-2006 document. - */ -static int fake_assoc_req(struct net_device *dev, - struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap) -{ - struct wpan_phy *phy = fake_to_phy(dev); - - mutex_lock(&phy->pib_lock); - phy->current_channel = channel; - phy->current_page = page; - mutex_unlock(&phy->pib_lock); - - /* We simply emulate it here */ - return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev), - IEEE802154_SUCCESS); -} - -/** - * fake_assoc_resp - Send an association response to a device. - * @dev: The network device on which to send the response. - * @addr: The address of the device to respond to. - * @short_addr: The assigned short address for the device (if any). - * @status: The result of the association request. - * - * Queue the association response of the coordinator to another - * device's attempt to associate with the network which we - * coordinate. This is then added to the indirect-send queue to be - * transmitted to the end device when it polls for data. - * - * Note: This is in section 7.3.2 and 7.5.3.1 of the IEEE - * 802.15.4-2006 document. - */ -static int fake_assoc_resp(struct net_device *dev, - struct ieee802154_addr *addr, __le16 short_addr, u8 status) -{ - return 0; -} - -/** - * fake_disassoc_req - Disassociate a device from a network. - * @dev: The network device on which we're disassociating a device. - * @addr: The device to disassociate from the network. - * @reason: The reason to give to the device for being disassociated. - * - * This sends a disassociation notification to the device being - * disassociated from the network. - * - * Note: This is in section 7.5.3.2 of the IEEE 802.15.4-2006 - * document, with the reason described in 7.3.3.2. - */ -static int fake_disassoc_req(struct net_device *dev, - struct ieee802154_addr *addr, u8 reason) -{ - return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS); -} - -/** - * fake_start_req - Start an IEEE 802.15.4 PAN. - * @dev: The network device on which to start the PAN. - * @addr: The coordinator address to use when starting the PAN. - * @channel: The channel on which to start the PAN. - * @bcn_ord: Beacon order. - * @sf_ord: Superframe order. - * @pan_coord: Whether or not we are the PAN coordinator or just - * requesting a realignment perhaps? - * @blx: Battery Life Extension feature bitfield. - * @coord_realign: Something to realign something else. - * - * If pan_coord is non-zero then this starts a network with the - * provided parameters, otherwise it attempts a coordinator - * realignment of the stated network instead. - * - * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006 - * document, with 7.3.8 describing coordinator realignment. - */ -static int fake_start_req(struct net_device *dev, - struct ieee802154_addr *addr, u8 channel, u8 page, - u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx, - u8 coord_realign) -{ - struct wpan_phy *phy = fake_to_phy(dev); - - mutex_lock(&phy->pib_lock); - phy->current_channel = channel; - phy->current_page = page; - mutex_unlock(&phy->pib_lock); - - /* We don't emulate beacons here at all, so START should fail */ - ieee802154_nl_start_confirm(dev, IEEE802154_INVALID_PARAMETER); - return 0; -} - -/** - * fake_scan_req - Start a channel scan. - * @dev: The network device on which to perform a channel scan. - * @type: The type of scan to perform. - * @channels: The channel bitmask to scan. - * @duration: How long to spend on each channel. - * - * This starts either a passive (energy) scan or an active (PAN) scan - * on the channels indicated in the @channels bitmask. The duration of - * the scan is measured in terms of superframe duration. Specifically, - * the scan will spend aBaseSuperFrameDuration * ((2^n) + 1) on each - * channel. - * - * Note: This is in section 7.5.2.1 of the IEEE 802.15.4-2006 document. - */ -static int fake_scan_req(struct net_device *dev, u8 type, u32 channels, - u8 page, u8 duration) -{ - u8 edl[27] = {}; - return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type, - channels, page, - type == IEEE802154_MAC_SCAN_ED ? edl : NULL); -} - -static struct ieee802154_mlme_ops fake_mlme = { - .assoc_req = fake_assoc_req, - .assoc_resp = fake_assoc_resp, - .disassoc_req = fake_disassoc_req, - .start_req = fake_start_req, - .scan_req = fake_scan_req, - - .get_phy = fake_get_phy, - - .get_pan_id = fake_get_pan_id, - .get_short_addr = fake_get_short_addr, - .get_dsn = fake_get_dsn, -}; - -static int ieee802154_fake_open(struct net_device *dev) -{ - netif_start_queue(dev); - return 0; -} - -static int ieee802154_fake_close(struct net_device *dev) -{ - netif_stop_queue(dev); - return 0; -} - -static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - /* FIXME: do hardware work here ... */ - - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - - -static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr, - int cmd) -{ - struct sockaddr_ieee802154 *sa = - (struct sockaddr_ieee802154 *)&ifr->ifr_addr; - u16 pan_id, short_addr; - - switch (cmd) { - case SIOCGIFADDR: - /* FIXME: fixed here, get from device IRL */ - pan_id = le16_to_cpu(fake_get_pan_id(dev)); - short_addr = le16_to_cpu(fake_get_short_addr(dev)); - if (pan_id == IEEE802154_PANID_BROADCAST || - short_addr == IEEE802154_ADDR_BROADCAST) - return -EADDRNOTAVAIL; - - sa->family = AF_IEEE802154; - sa->addr.addr_type = IEEE802154_ADDR_SHORT; - sa->addr.pan_id = pan_id; - sa->addr.short_addr = short_addr; - return 0; - } - return -ENOIOCTLCMD; -} - -static int ieee802154_fake_mac_addr(struct net_device *dev, void *p) -{ - return -EBUSY; /* HW address is built into the device */ -} - -static const struct net_device_ops fake_ops = { - .ndo_open = ieee802154_fake_open, - .ndo_stop = ieee802154_fake_close, - .ndo_start_xmit = ieee802154_fake_xmit, - .ndo_do_ioctl = ieee802154_fake_ioctl, - .ndo_set_mac_address = ieee802154_fake_mac_addr, -}; - -static void ieee802154_fake_destruct(struct net_device *dev) -{ - struct wpan_phy *phy = fake_to_phy(dev); - - wpan_phy_unregister(phy); - free_netdev(dev); - wpan_phy_free(phy); -} - -static void ieee802154_fake_setup(struct net_device *dev) -{ - dev->addr_len = IEEE802154_ADDR_LEN; - memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); - dev->features = NETIF_F_HW_CSUM; - dev->needed_tailroom = 2; /* FCS */ - dev->mtu = 127; - dev->tx_queue_len = 10; - dev->type = ARPHRD_IEEE802154; - dev->flags = IFF_NOARP | IFF_BROADCAST; - dev->watchdog_timeo = 0; - dev->destructor = ieee802154_fake_destruct; -} - - -static int ieee802154fake_probe(struct platform_device *pdev) -{ - struct net_device *dev; - struct fakehard_priv *priv; - struct wpan_phy *phy = wpan_phy_alloc(0); - int err; - - if (!phy) - return -ENOMEM; - - dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", - NET_NAME_UNKNOWN, ieee802154_fake_setup); - if (!dev) { - wpan_phy_free(phy); - return -ENOMEM; - } - - memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef", - dev->addr_len); - - /* - * For now we'd like to emulate 2.4 GHz-only device, - * both O-QPSK and CSS - */ - /* 2.4 GHz O-QPSK 802.15.4-2003 */ - phy->channels_supported[0] |= 0x7FFF800; - /* 2.4 GHz CSS 802.15.4a-2007 */ - phy->channels_supported[3] |= 0x3fff; - - phy->transmit_power = 0xbf; - - dev->netdev_ops = &fake_ops; - dev->ml_priv = &fake_mlme; - - priv = netdev_priv(dev); - priv->phy = phy; - - wpan_phy_set_dev(phy, &pdev->dev); - SET_NETDEV_DEV(dev, &phy->dev); - - platform_set_drvdata(pdev, dev); - - err = wpan_phy_register(phy); - if (err) - goto err_phy_reg; - - err = register_netdev(dev); - if (err) - goto err_netdev_reg; - - dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); - return 0; - -err_netdev_reg: - wpan_phy_unregister(phy); -err_phy_reg: - free_netdev(dev); - wpan_phy_free(phy); - return err; -} - -static int ieee802154fake_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - unregister_netdev(dev); - return 0; -} - -static struct platform_device *ieee802154fake_dev; - -static struct platform_driver ieee802154fake_driver = { - .probe = ieee802154fake_probe, - .remove = ieee802154fake_remove, - .driver = { - .name = "ieee802154hardmac", - .owner = THIS_MODULE, - }, -}; - -static __init int fake_init(void) -{ - ieee802154fake_dev = platform_device_register_simple( - "ieee802154hardmac", -1, NULL, 0); - return platform_driver_register(&ieee802154fake_driver); -} - -static __exit void fake_exit(void) -{ - platform_driver_unregister(&ieee802154fake_driver); - platform_device_unregister(ieee802154fake_dev); -} - -module_init(fake_init); -module_exit(fake_exit); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 27d83207d24ce0de722144c74e98ce65601fcfc0..96947d724189d88862d0af034c8d486ec4bbb428 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Dmitry Eremin-Solenikov @@ -29,12 +25,12 @@ #include #include #include -#include +#include static int numlbs = 1; struct fakelb_dev_priv { - struct ieee802154_dev *dev; + struct ieee802154_hw *hw; struct list_head list; struct fakelb_priv *fake; @@ -49,9 +45,8 @@ struct fakelb_priv { }; static int -fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level) +fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) { - might_sleep(); BUG_ON(!level); *level = 0xbe; @@ -59,14 +54,10 @@ fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level) } static int -fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel) +fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { pr_debug("set channel to %d\n", channel); - might_sleep(); - dev->phy->current_page = page; - dev->phy->current_channel = channel; - return 0; } @@ -78,19 +69,17 @@ fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb) spin_lock(&priv->lock); if (priv->working) { newskb = pskb_copy(skb, GFP_ATOMIC); - ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc); + ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc); } spin_unlock(&priv->lock); } static int -fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) +fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { - struct fakelb_dev_priv *priv = dev->priv; + struct fakelb_dev_priv *priv = hw->priv; struct fakelb_priv *fake = priv->fake; - might_sleep(); - read_lock_bh(&fake->lock); if (priv->list.next == priv->list.prev) { /* we are the only one device */ @@ -99,8 +88,8 @@ fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) struct fakelb_dev_priv *dp; list_for_each_entry(dp, &priv->fake->list, list) { if (dp != priv && - (dp->dev->phy->current_channel == - priv->dev->phy->current_channel)) + (dp->hw->phy->current_channel == + priv->hw->phy->current_channel)) fakelb_hw_deliver(dp, skb); } } @@ -110,8 +99,8 @@ fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) } static int -fakelb_hw_start(struct ieee802154_dev *dev) { - struct fakelb_dev_priv *priv = dev->priv; +fakelb_hw_start(struct ieee802154_hw *hw) { + struct fakelb_dev_priv *priv = hw->priv; int ret = 0; spin_lock(&priv->lock); @@ -125,17 +114,17 @@ fakelb_hw_start(struct ieee802154_dev *dev) { } static void -fakelb_hw_stop(struct ieee802154_dev *dev) { - struct fakelb_dev_priv *priv = dev->priv; +fakelb_hw_stop(struct ieee802154_hw *hw) { + struct fakelb_dev_priv *priv = hw->priv; spin_lock(&priv->lock); priv->working = 0; spin_unlock(&priv->lock); } -static struct ieee802154_ops fakelb_ops = { +static const struct ieee802154_ops fakelb_ops = { .owner = THIS_MODULE, - .xmit = fakelb_hw_xmit, + .xmit_sync = fakelb_hw_xmit, .ed = fakelb_hw_ed, .set_channel = fakelb_hw_channel, .start = fakelb_hw_start, @@ -150,54 +139,54 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) { struct fakelb_dev_priv *priv; int err; - struct ieee802154_dev *ieee; + struct ieee802154_hw *hw; - ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops); - if (!ieee) + hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops); + if (!hw) return -ENOMEM; - priv = ieee->priv; - priv->dev = ieee; + priv = hw->priv; + priv->hw = hw; /* 868 MHz BPSK 802.15.4-2003 */ - ieee->phy->channels_supported[0] |= 1; + hw->phy->channels_supported[0] |= 1; /* 915 MHz BPSK 802.15.4-2003 */ - ieee->phy->channels_supported[0] |= 0x7fe; + hw->phy->channels_supported[0] |= 0x7fe; /* 2.4 GHz O-QPSK 802.15.4-2003 */ - ieee->phy->channels_supported[0] |= 0x7FFF800; + hw->phy->channels_supported[0] |= 0x7FFF800; /* 868 MHz ASK 802.15.4-2006 */ - ieee->phy->channels_supported[1] |= 1; + hw->phy->channels_supported[1] |= 1; /* 915 MHz ASK 802.15.4-2006 */ - ieee->phy->channels_supported[1] |= 0x7fe; + hw->phy->channels_supported[1] |= 0x7fe; /* 868 MHz O-QPSK 802.15.4-2006 */ - ieee->phy->channels_supported[2] |= 1; + hw->phy->channels_supported[2] |= 1; /* 915 MHz O-QPSK 802.15.4-2006 */ - ieee->phy->channels_supported[2] |= 0x7fe; + hw->phy->channels_supported[2] |= 0x7fe; /* 2.4 GHz CSS 802.15.4a-2007 */ - ieee->phy->channels_supported[3] |= 0x3fff; + hw->phy->channels_supported[3] |= 0x3fff; /* UWB Sub-gigahertz 802.15.4a-2007 */ - ieee->phy->channels_supported[4] |= 1; + hw->phy->channels_supported[4] |= 1; /* UWB Low band 802.15.4a-2007 */ - ieee->phy->channels_supported[4] |= 0x1e; + hw->phy->channels_supported[4] |= 0x1e; /* UWB High band 802.15.4a-2007 */ - ieee->phy->channels_supported[4] |= 0xffe0; + hw->phy->channels_supported[4] |= 0xffe0; /* 750 MHz O-QPSK 802.15.4c-2009 */ - ieee->phy->channels_supported[5] |= 0xf; + hw->phy->channels_supported[5] |= 0xf; /* 750 MHz MPSK 802.15.4c-2009 */ - ieee->phy->channels_supported[5] |= 0xf0; + hw->phy->channels_supported[5] |= 0xf0; /* 950 MHz BPSK 802.15.4d-2009 */ - ieee->phy->channels_supported[6] |= 0x3ff; + hw->phy->channels_supported[6] |= 0x3ff; /* 950 MHz GFSK 802.15.4d-2009 */ - ieee->phy->channels_supported[6] |= 0x3ffc00; + hw->phy->channels_supported[6] |= 0x3ffc00; INIT_LIST_HEAD(&priv->list); priv->fake = fake; spin_lock_init(&priv->lock); - ieee->parent = dev; + hw->parent = dev; - err = ieee802154_register_device(ieee); + err = ieee802154_register_hw(hw); if (err) goto err_reg; @@ -208,7 +197,7 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) return 0; err_reg: - ieee802154_free_device(priv->dev); + ieee802154_free_hw(priv->hw); return err; } @@ -218,8 +207,8 @@ static void fakelb_del(struct fakelb_dev_priv *priv) list_del(&priv->list); write_unlock_bh(&priv->fake->lock); - ieee802154_unregister_device(priv->dev); - ieee802154_free_device(priv->dev); + ieee802154_unregister_hw(priv->hw); + ieee802154_free_hw(priv->hw); } static int fakelb_probe(struct platform_device *pdev) diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 07e0b887c350f07bb125b88479626187f814e828..a200fa16beae406757095f0f7090999b7a596b9c 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -13,18 +13,14 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include #include #include -#include +#include +#include #include -#include /* MRF24J40 Short Address Registers */ #define REG_RXMCR 0x00 /* Receive MAC control */ @@ -43,6 +39,8 @@ #define REG_TXSTBL 0x2E /* TX Stabilization */ #define REG_INTSTAT 0x31 /* Interrupt Status */ #define REG_INTCON 0x32 /* Interrupt Control */ +#define REG_GPIO 0x33 /* GPIO */ +#define REG_TRISGPIO 0x34 /* GPIO direction */ #define REG_RFCTL 0x36 /* RF Control Mode Register */ #define REG_BBREG1 0x39 /* Baseband Registers */ #define REG_BBREG2 0x3A /* */ @@ -63,6 +61,7 @@ #define REG_SLPCON1 0x220 #define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */ #define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */ +#define REG_TESTMODE 0x22F /* Test mode */ #define REG_RX_FIFO 0x300 /* Receive FIFO */ /* Device configuration: Only channels 11-26 on page 0 are supported. */ @@ -75,10 +74,12 @@ #define RX_FIFO_SIZE 144 /* From datasheet */ #define SET_CHANNEL_DELAY_US 192 /* From datasheet */ +enum mrf24j40_modules { MRF24J40, MRF24J40MA, MRF24J40MC }; + /* Device Private Data */ struct mrf24j40 { struct spi_device *spi; - struct ieee802154_dev *dev; + struct ieee802154_hw *hw; struct mutex buffer_mutex; /* only used to protect buf */ struct completion tx_complete; @@ -331,9 +332,9 @@ out: return ret; } -static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb) +static int mrf24j40_tx(struct ieee802154_hw *hw, struct sk_buff *skb) { - struct mrf24j40 *devrec = dev->priv; + struct mrf24j40 *devrec = hw->priv; u8 val; int ret = 0; @@ -382,7 +383,7 @@ err: return ret; } -static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level) +static int mrf24j40_ed(struct ieee802154_hw *hw, u8 *level) { /* TODO: */ pr_warn("mrf24j40: ed not implemented\n"); @@ -390,9 +391,9 @@ static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level) return 0; } -static int mrf24j40_start(struct ieee802154_dev *dev) +static int mrf24j40_start(struct ieee802154_hw *hw) { - struct mrf24j40 *devrec = dev->priv; + struct mrf24j40 *devrec = hw->priv; u8 val; int ret; @@ -407,9 +408,9 @@ static int mrf24j40_start(struct ieee802154_dev *dev) return 0; } -static void mrf24j40_stop(struct ieee802154_dev *dev) +static void mrf24j40_stop(struct ieee802154_hw *hw) { - struct mrf24j40 *devrec = dev->priv; + struct mrf24j40 *devrec = hw->priv; u8 val; int ret; @@ -422,10 +423,9 @@ static void mrf24j40_stop(struct ieee802154_dev *dev) write_short_reg(devrec, REG_INTCON, val); } -static int mrf24j40_set_channel(struct ieee802154_dev *dev, - int page, int channel) +static int mrf24j40_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { - struct mrf24j40 *devrec = dev->priv; + struct mrf24j40 *devrec = hw->priv; u8 val; int ret; @@ -453,15 +453,15 @@ static int mrf24j40_set_channel(struct ieee802154_dev *dev, return 0; } -static int mrf24j40_filter(struct ieee802154_dev *dev, +static int mrf24j40_filter(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { - struct mrf24j40 *devrec = dev->priv; + struct mrf24j40 *devrec = hw->priv; dev_dbg(printdev(devrec), "filter\n"); - if (changed & IEEE802515_AFILT_SADDR_CHANGED) { + if (changed & IEEE802154_AFILT_SADDR_CHANGED) { /* Short Addr */ u8 addrh, addrl; @@ -474,7 +474,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev, "Set short addr to %04hx\n", filt->short_addr); } - if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) { + if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { /* Device Address */ u8 i, addr[8]; @@ -490,7 +490,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev, #endif } - if (changed & IEEE802515_AFILT_PANID_CHANGED) { + if (changed & IEEE802154_AFILT_PANID_CHANGED) { /* PAN ID */ u8 panidl, panidh; @@ -502,7 +502,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev, dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id); } - if (changed & IEEE802515_AFILT_PANC_CHANGED) { + if (changed & IEEE802154_AFILT_PANC_CHANGED) { /* Pan Coordinator */ u8 val; int ret; @@ -543,7 +543,7 @@ static int mrf24j40_handle_rx(struct mrf24j40 *devrec) val |= 4; /* SET RXDECINV */ write_short_reg(devrec, REG_BBREG1, val); - skb = alloc_skb(len, GFP_KERNEL); + skb = dev_alloc_skb(len); if (!skb) { ret = -ENOMEM; goto out; @@ -563,7 +563,7 @@ static int mrf24j40_handle_rx(struct mrf24j40 *devrec) /* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040, * also from a workqueue). I think irqsafe is not necessary here. * Can someone confirm? */ - ieee802154_rx_irqsafe(devrec->dev, skb, lqi); + ieee802154_rx_irqsafe(devrec->hw, skb, lqi); dev_dbg(printdev(devrec), "RX Handled\n"); @@ -578,9 +578,9 @@ out: return ret; } -static struct ieee802154_ops mrf24j40_ops = { +static const struct ieee802154_ops mrf24j40_ops = { .owner = THIS_MODULE, - .xmit = mrf24j40_tx, + .xmit_sync = mrf24j40_tx, .ed = mrf24j40_ed, .start = mrf24j40_start, .stop = mrf24j40_stop, @@ -691,6 +691,28 @@ static int mrf24j40_hw_init(struct mrf24j40 *devrec) if (ret) goto err_ret; + if (spi_get_device_id(devrec->spi)->driver_data == MRF24J40MC) { + /* Enable external amplifier. + * From MRF24J40MC datasheet section 1.3: Operation. + */ + read_long_reg(devrec, REG_TESTMODE, &val); + val |= 0x7; /* Configure GPIO 0-2 to control amplifier */ + write_long_reg(devrec, REG_TESTMODE, val); + + read_short_reg(devrec, REG_TRISGPIO, &val); + val |= 0x8; /* Set GPIO3 as output. */ + write_short_reg(devrec, REG_TRISGPIO, val); + + read_short_reg(devrec, REG_GPIO, &val); + val |= 0x8; /* Set GPIO3 HIGH to enable U5 voltage regulator */ + write_short_reg(devrec, REG_GPIO, val); + + /* Reduce TX pwr to meet FCC requirements. + * From MRF24J40MC datasheet section 3.1.1 + */ + write_long_reg(devrec, REG_RFCON3, 0x28); + } + return 0; err_ret: @@ -722,17 +744,18 @@ static int mrf24j40_probe(struct spi_device *spi) /* Register with the 802154 subsystem */ - devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops); - if (!devrec->dev) + devrec->hw = ieee802154_alloc_hw(0, &mrf24j40_ops); + if (!devrec->hw) goto err_ret; - devrec->dev->priv = devrec; - devrec->dev->parent = &devrec->spi->dev; - devrec->dev->phy->channels_supported[0] = CHANNEL_MASK; - devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK; + devrec->hw->priv = devrec; + devrec->hw->parent = &devrec->spi->dev; + devrec->hw->phy->channels_supported[0] = CHANNEL_MASK; + devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK | + IEEE802154_HW_AFILT; dev_dbg(printdev(devrec), "registered mrf24j40\n"); - ret = ieee802154_register_device(devrec->dev); + ret = ieee802154_register_hw(devrec->hw); if (ret) goto err_register_device; @@ -757,9 +780,9 @@ static int mrf24j40_probe(struct spi_device *spi) err_irq: err_hw_init: - ieee802154_unregister_device(devrec->dev); + ieee802154_unregister_hw(devrec->hw); err_register_device: - ieee802154_free_device(devrec->dev); + ieee802154_free_hw(devrec->hw); err_ret: return ret; } @@ -770,8 +793,8 @@ static int mrf24j40_remove(struct spi_device *spi) dev_dbg(printdev(devrec), "remove\n"); - ieee802154_unregister_device(devrec->dev); - ieee802154_free_device(devrec->dev); + ieee802154_unregister_hw(devrec->hw); + ieee802154_free_hw(devrec->hw); /* TODO: Will ieee802154_free_device() wait until ->xmit() is * complete? */ @@ -779,8 +802,9 @@ static int mrf24j40_remove(struct spi_device *spi) } static const struct spi_device_id mrf24j40_ids[] = { - { "mrf24j40", 0 }, - { "mrf24j40ma", 0 }, + { "mrf24j40", MRF24J40 }, + { "mrf24j40ma", MRF24J40MA }, + { "mrf24j40mc", MRF24J40MC }, { }, }; MODULE_DEVICE_TABLE(spi, mrf24j40_ids); diff --git a/drivers/net/ipvlan/Makefile b/drivers/net/ipvlan/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..df79910192d6c92b489c6c71f2f5fbdc795a7782 --- /dev/null +++ b/drivers/net/ipvlan/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Ethernet Ipvlan driver +# + +obj-$(CONFIG_IPVLAN) += ipvlan.o + +ipvlan-objs := ipvlan_core.o ipvlan_main.o diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h new file mode 100644 index 0000000000000000000000000000000000000000..2729f64b3e7e4b5a99887ec787b61d76fd1f54cd --- /dev/null +++ b/drivers/net/ipvlan/ipvlan.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2014 Mahesh Bandewar + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + */ +#ifndef __IPVLAN_H +#define __IPVLAN_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IPVLAN_DRV "ipvlan" +#define IPV_DRV_VER "0.1" + +#define IPVLAN_HASH_SIZE (1 << BITS_PER_BYTE) +#define IPVLAN_HASH_MASK (IPVLAN_HASH_SIZE - 1) + +#define IPVLAN_MAC_FILTER_BITS 8 +#define IPVLAN_MAC_FILTER_SIZE (1 << IPVLAN_MAC_FILTER_BITS) +#define IPVLAN_MAC_FILTER_MASK (IPVLAN_MAC_FILTER_SIZE - 1) + +typedef enum { + IPVL_IPV6 = 0, + IPVL_ICMPV6, + IPVL_IPV4, + IPVL_ARP, +} ipvl_hdr_type; + +struct ipvl_pcpu_stats { + u64 rx_pkts; + u64 rx_bytes; + u64 rx_mcast; + u64 tx_pkts; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_errs; + u32 tx_drps; +}; + +struct ipvl_port; + +struct ipvl_dev { + struct net_device *dev; + struct list_head pnode; + struct ipvl_port *port; + struct net_device *phy_dev; + struct list_head addrs; + int ipv4cnt; + int ipv6cnt; + struct ipvl_pcpu_stats *pcpu_stats; + DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); + netdev_features_t sfeatures; + u32 msg_enable; + u16 mtu_adj; +}; + +struct ipvl_addr { + struct ipvl_dev *master; /* Back pointer to master */ + union { + struct in6_addr ip6; /* IPv6 address on logical interface */ + struct in_addr ip4; /* IPv4 address on logical interface */ + } ipu; +#define ip6addr ipu.ip6 +#define ip4addr ipu.ip4 + struct hlist_node hlnode; /* Hash-table linkage */ + struct list_head anode; /* logical-interface linkage */ + struct rcu_head rcu; + ipvl_hdr_type atype; +}; + +struct ipvl_port { + struct net_device *dev; + struct hlist_head hlhead[IPVLAN_HASH_SIZE]; + struct list_head ipvlans; + struct rcu_head rcu; + int count; + u16 mode; +}; + +static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) +{ + return rcu_dereference(d->rx_handler_data); +} + +static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) +{ + return rtnl_dereference(d->rx_handler_data); +} + +void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev); +void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval); +void ipvlan_init_secret(void); +unsigned int ipvlan_mac_hash(const unsigned char *addr); +rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); +int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); +void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); +bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6); +struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, + const void *iaddr, bool is_v6); +void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); +#endif /* __IPVLAN_H */ diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c new file mode 100644 index 0000000000000000000000000000000000000000..a14d87783245a94986232fbbe20b64a57194359d --- /dev/null +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -0,0 +1,607 @@ +/* Copyright (c) 2014 Mahesh Bandewar + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + */ + +#include "ipvlan.h" + +static u32 ipvlan_jhash_secret; + +void ipvlan_init_secret(void) +{ + net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret)); +} + +static void ipvlan_count_rx(const struct ipvl_dev *ipvlan, + unsigned int len, bool success, bool mcast) +{ + if (!ipvlan) + return; + + if (likely(success)) { + struct ipvl_pcpu_stats *pcptr; + + pcptr = this_cpu_ptr(ipvlan->pcpu_stats); + u64_stats_update_begin(&pcptr->syncp); + pcptr->rx_pkts++; + pcptr->rx_bytes += len; + if (mcast) + pcptr->rx_mcast++; + u64_stats_update_end(&pcptr->syncp); + } else { + this_cpu_inc(ipvlan->pcpu_stats->rx_errs); + } +} + +static u8 ipvlan_get_v6_hash(const void *iaddr) +{ + const struct in6_addr *ip6_addr = iaddr; + + return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) & + IPVLAN_HASH_MASK; +} + +static u8 ipvlan_get_v4_hash(const void *iaddr) +{ + const struct in_addr *ip4_addr = iaddr; + + return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) & + IPVLAN_HASH_MASK; +} + +struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, + const void *iaddr, bool is_v6) +{ + struct ipvl_addr *addr; + u8 hash; + + hash = is_v6 ? ipvlan_get_v6_hash(iaddr) : + ipvlan_get_v4_hash(iaddr); + hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) { + if (is_v6 && addr->atype == IPVL_IPV6 && + ipv6_addr_equal(&addr->ip6addr, iaddr)) + return addr; + else if (!is_v6 && addr->atype == IPVL_IPV4 && + addr->ip4addr.s_addr == + ((struct in_addr *)iaddr)->s_addr) + return addr; + } + return NULL; +} + +void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) +{ + struct ipvl_port *port = ipvlan->port; + u8 hash; + + hash = (addr->atype == IPVL_IPV6) ? + ipvlan_get_v6_hash(&addr->ip6addr) : + ipvlan_get_v4_hash(&addr->ip4addr); + hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); +} + +void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) +{ + hlist_del_rcu(&addr->hlnode); + if (sync) + synchronize_rcu(); +} + +bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) +{ + struct ipvl_port *port = ipvlan->port; + struct ipvl_addr *addr; + + list_for_each_entry(addr, &ipvlan->addrs, anode) { + if ((is_v6 && addr->atype == IPVL_IPV6 && + ipv6_addr_equal(&addr->ip6addr, iaddr)) || + (!is_v6 && addr->atype == IPVL_IPV4 && + addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) + return true; + } + + if (ipvlan_ht_addr_lookup(port, iaddr, is_v6)) + return true; + + return false; +} + +static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type) +{ + void *lyr3h = NULL; + + switch (skb->protocol) { + case htons(ETH_P_ARP): { + struct arphdr *arph; + + if (unlikely(!pskb_may_pull(skb, sizeof(*arph)))) + return NULL; + + arph = arp_hdr(skb); + *type = IPVL_ARP; + lyr3h = arph; + break; + } + case htons(ETH_P_IP): { + u32 pktlen; + struct iphdr *ip4h; + + if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h)))) + return NULL; + + ip4h = ip_hdr(skb); + pktlen = ntohs(ip4h->tot_len); + if (ip4h->ihl < 5 || ip4h->version != 4) + return NULL; + if (skb->len < pktlen || pktlen < (ip4h->ihl * 4)) + return NULL; + + *type = IPVL_IPV4; + lyr3h = ip4h; + break; + } + case htons(ETH_P_IPV6): { + struct ipv6hdr *ip6h; + + if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h)))) + return NULL; + + ip6h = ipv6_hdr(skb); + if (ip6h->version != 6) + return NULL; + + *type = IPVL_IPV6; + lyr3h = ip6h; + /* Only Neighbour Solicitation pkts need different treatment */ + if (ipv6_addr_any(&ip6h->saddr) && + ip6h->nexthdr == NEXTHDR_ICMP) { + *type = IPVL_ICMPV6; + lyr3h = ip6h + 1; + } + break; + } + default: + return NULL; + } + + return lyr3h; +} + +unsigned int ipvlan_mac_hash(const unsigned char *addr) +{ + u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2), + ipvlan_jhash_secret); + + return hash & IPVLAN_MAC_FILTER_MASK; +} + +static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, + const struct ipvl_dev *in_dev, bool local) +{ + struct ethhdr *eth = eth_hdr(skb); + struct ipvl_dev *ipvlan; + struct sk_buff *nskb; + unsigned int len; + unsigned int mac_hash; + int ret; + + if (skb->protocol == htons(ETH_P_PAUSE)) + return; + + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + if (local && (ipvlan == in_dev)) + continue; + + mac_hash = ipvlan_mac_hash(eth->h_dest); + if (!test_bit(mac_hash, ipvlan->mac_filters)) + continue; + + ret = NET_RX_DROP; + len = skb->len + ETH_HLEN; + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + goto mcast_acct; + + if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast)) + nskb->pkt_type = PACKET_BROADCAST; + else + nskb->pkt_type = PACKET_MULTICAST; + + nskb->dev = ipvlan->dev; + if (local) + ret = dev_forward_skb(ipvlan->dev, nskb); + else + ret = netif_rx(nskb); +mcast_acct: + ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); + } + + /* Locally generated? ...Forward a copy to the main-device as + * well. On the RX side we'll ignore it (wont give it to any + * of the virtual devices. + */ + if (local) { + nskb = skb_clone(skb, GFP_ATOMIC); + if (nskb) { + if (ether_addr_equal(eth->h_dest, port->dev->broadcast)) + nskb->pkt_type = PACKET_BROADCAST; + else + nskb->pkt_type = PACKET_MULTICAST; + + dev_forward_skb(port->dev, nskb); + } + } +} + +static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, + bool local) +{ + struct ipvl_dev *ipvlan = addr->master; + struct net_device *dev = ipvlan->dev; + unsigned int len; + rx_handler_result_t ret = RX_HANDLER_CONSUMED; + bool success = false; + + len = skb->len + ETH_HLEN; + if (unlikely(!(dev->flags & IFF_UP))) { + kfree_skb(skb); + goto out; + } + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto out; + + skb->dev = dev; + skb->pkt_type = PACKET_HOST; + + if (local) { + if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) + success = true; + } else { + ret = RX_HANDLER_ANOTHER; + success = true; + } + +out: + ipvlan_count_rx(ipvlan, len, success, false); + return ret; +} + +static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, + void *lyr3h, int addr_type, + bool use_dest) +{ + struct ipvl_addr *addr = NULL; + + if (addr_type == IPVL_IPV6) { + struct ipv6hdr *ip6h; + struct in6_addr *i6addr; + + ip6h = (struct ipv6hdr *)lyr3h; + i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr; + addr = ipvlan_ht_addr_lookup(port, i6addr, true); + } else if (addr_type == IPVL_ICMPV6) { + struct nd_msg *ndmh; + struct in6_addr *i6addr; + + /* Make sure that the NeighborSolicitation ICMPv6 packets + * are handled to avoid DAD issue. + */ + ndmh = (struct nd_msg *)lyr3h; + if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { + i6addr = &ndmh->target; + addr = ipvlan_ht_addr_lookup(port, i6addr, true); + } + } else if (addr_type == IPVL_IPV4) { + struct iphdr *ip4h; + __be32 *i4addr; + + ip4h = (struct iphdr *)lyr3h; + i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr; + addr = ipvlan_ht_addr_lookup(port, i4addr, false); + } else if (addr_type == IPVL_ARP) { + struct arphdr *arph; + unsigned char *arp_ptr; + __be32 dip; + + arph = (struct arphdr *)lyr3h; + arp_ptr = (unsigned char *)(arph + 1); + if (use_dest) + arp_ptr += (2 * port->dev->addr_len) + 4; + else + arp_ptr += port->dev->addr_len; + + memcpy(&dip, arp_ptr, 4); + addr = ipvlan_ht_addr_lookup(port, &dip, false); + } + + return addr; +} + +static int ipvlan_process_v4_outbound(struct sk_buff *skb) +{ + const struct iphdr *ip4h = ip_hdr(skb); + struct net_device *dev = skb->dev; + struct rtable *rt; + int err, ret = NET_XMIT_DROP; + struct flowi4 fl4 = { + .flowi4_oif = dev->iflink, + .flowi4_tos = RT_TOS(ip4h->tos), + .flowi4_flags = FLOWI_FLAG_ANYSRC, + .daddr = ip4h->daddr, + .saddr = ip4h->saddr, + }; + + rt = ip_route_output_flow(dev_net(dev), &fl4, NULL); + if (IS_ERR(rt)) + goto err; + + if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { + ip_rt_put(rt); + goto err; + } + skb_dst_drop(skb); + skb_dst_set(skb, &rt->dst); + err = ip_local_out(skb); + if (unlikely(net_xmit_eval(err))) + dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + goto out; +err: + dev->stats.tx_errors++; + kfree_skb(skb); +out: + return ret; +} + +static int ipvlan_process_v6_outbound(struct sk_buff *skb) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct net_device *dev = skb->dev; + struct dst_entry *dst; + int err, ret = NET_XMIT_DROP; + struct flowi6 fl6 = { + .flowi6_iif = skb->dev->ifindex, + .daddr = ip6h->daddr, + .saddr = ip6h->saddr, + .flowi6_flags = FLOWI_FLAG_ANYSRC, + .flowlabel = ip6_flowinfo(ip6h), + .flowi6_mark = skb->mark, + .flowi6_proto = ip6h->nexthdr, + }; + + dst = ip6_route_output(dev_net(dev), NULL, &fl6); + if (IS_ERR(dst)) + goto err; + + skb_dst_drop(skb); + skb_dst_set(skb, dst); + err = ip6_local_out(skb); + if (unlikely(net_xmit_eval(err))) + dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + goto out; +err: + dev->stats.tx_errors++; + kfree_skb(skb); +out: + return ret; +} + +static int ipvlan_process_outbound(struct sk_buff *skb, + const struct ipvl_dev *ipvlan) +{ + struct ethhdr *ethh = eth_hdr(skb); + int ret = NET_XMIT_DROP; + + /* In this mode we dont care about multicast and broadcast traffic */ + if (is_multicast_ether_addr(ethh->h_dest)) { + pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n", + ntohs(skb->protocol)); + kfree_skb(skb); + goto out; + } + + /* The ipvlan is a pseudo-L2 device, so the packets that we receive + * will have L2; which need to discarded and processed further + * in the net-ns of the main-device. + */ + if (skb_mac_header_was_set(skb)) { + skb_pull(skb, sizeof(*ethh)); + skb->mac_header = (typeof(skb->mac_header))~0U; + skb_reset_network_header(skb); + } + + if (skb->protocol == htons(ETH_P_IPV6)) + ret = ipvlan_process_v6_outbound(skb); + else if (skb->protocol == htons(ETH_P_IP)) + ret = ipvlan_process_v4_outbound(skb); + else { + pr_warn_ratelimited("Dropped outbound packet type=%x\n", + ntohs(skb->protocol)); + kfree_skb(skb); + } +out: + return ret; +} + +static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + void *lyr3h; + struct ipvl_addr *addr; + int addr_type; + + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (!lyr3h) + goto out; + + addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); + if (addr) + return ipvlan_rcv_frame(addr, skb, true); + +out: + skb->dev = ipvlan->phy_dev; + return ipvlan_process_outbound(skb, ipvlan); +} + +static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ethhdr *eth = eth_hdr(skb); + struct ipvl_addr *addr; + void *lyr3h; + int addr_type; + + if (ether_addr_equal(eth->h_dest, eth->h_source)) { + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (lyr3h) { + addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); + if (addr) + return ipvlan_rcv_frame(addr, skb, true); + } + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return NET_XMIT_DROP; + + /* Packet definitely does not belong to any of the + * virtual devices, but the dest is local. So forward + * the skb for the main-dev. At the RX side we just return + * RX_PASS for it to be processed further on the stack. + */ + return dev_forward_skb(ipvlan->phy_dev, skb); + + } else if (is_multicast_ether_addr(eth->h_dest)) { + u8 ip_summed = skb->ip_summed; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true); + skb->ip_summed = ip_summed; + } + + skb->dev = ipvlan->phy_dev; + return dev_queue_xmit(skb); +} + +int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); + + if (!port) + goto out; + + if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) + goto out; + + switch(port->mode) { + case IPVLAN_MODE_L2: + return ipvlan_xmit_mode_l2(skb, dev); + case IPVLAN_MODE_L3: + return ipvlan_xmit_mode_l3(skb, dev); + } + + /* Should not reach here */ + WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n", + port->mode); +out: + kfree_skb(skb); + return NET_XMIT_DROP; +} + +static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port) +{ + struct ethhdr *eth = eth_hdr(skb); + struct ipvl_addr *addr; + void *lyr3h; + int addr_type; + + if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) { + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (!lyr3h) + return true; + + addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false); + if (addr) + return false; + } + + return true; +} + +static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb, + struct ipvl_port *port) +{ + void *lyr3h; + int addr_type; + struct ipvl_addr *addr; + struct sk_buff *skb = *pskb; + rx_handler_result_t ret = RX_HANDLER_PASS; + + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (!lyr3h) + goto out; + + addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); + if (addr) + ret = ipvlan_rcv_frame(addr, skb, false); + +out: + return ret; +} + +static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, + struct ipvl_port *port) +{ + struct sk_buff *skb = *pskb; + struct ethhdr *eth = eth_hdr(skb); + rx_handler_result_t ret = RX_HANDLER_PASS; + void *lyr3h; + int addr_type; + + if (is_multicast_ether_addr(eth->h_dest)) { + if (ipvlan_external_frame(skb, port)) + ipvlan_multicast_frame(port, skb, NULL, false); + } else { + struct ipvl_addr *addr; + + lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + if (!lyr3h) + return ret; + + addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); + if (addr) + ret = ipvlan_rcv_frame(addr, skb, false); + } + + return ret; +} + +rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev); + + if (!port) + return RX_HANDLER_PASS; + + switch (port->mode) { + case IPVLAN_MODE_L2: + return ipvlan_handle_mode_l2(pskb, port); + case IPVLAN_MODE_L3: + return ipvlan_handle_mode_l3(pskb, port); + } + + /* Should not reach here */ + WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", + port->mode); + kfree_skb(skb); + return NET_RX_DROP; +} diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c new file mode 100644 index 0000000000000000000000000000000000000000..4f4099d5603d0b64f2a15b66e86dbee90260cead --- /dev/null +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -0,0 +1,795 @@ +/* Copyright (c) 2014 Mahesh Bandewar + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + */ + +#include "ipvlan.h" + +void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) +{ + ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj; +} + +void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval) +{ + struct ipvl_dev *ipvlan; + + if (port->mode != nval) { + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + if (nval == IPVLAN_MODE_L3) + ipvlan->dev->flags |= IFF_NOARP; + else + ipvlan->dev->flags &= ~IFF_NOARP; + } + port->mode = nval; + } +} + +static int ipvlan_port_create(struct net_device *dev) +{ + struct ipvl_port *port; + int err, idx; + + if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) { + netdev_err(dev, "Master is either lo or non-ether device\n"); + return -EINVAL; + } + + if (netif_is_macvlan_port(dev)) { + netdev_err(dev, "Master is a macvlan port.\n"); + return -EBUSY; + } + + port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->dev = dev; + port->mode = IPVLAN_MODE_L3; + INIT_LIST_HEAD(&port->ipvlans); + for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++) + INIT_HLIST_HEAD(&port->hlhead[idx]); + + err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port); + if (err) + goto err; + + dev->priv_flags |= IFF_IPVLAN_MASTER; + return 0; + +err: + kfree_rcu(port, rcu); + return err; +} + +static void ipvlan_port_destroy(struct net_device *dev) +{ + struct ipvl_port *port = ipvlan_port_get_rtnl(dev); + + dev->priv_flags &= ~IFF_IPVLAN_MASTER; + netdev_rx_handler_unregister(dev); + kfree_rcu(port, rcu); +} + +/* ipvlan network devices have devices nesting below it and are a special + * "super class" of normal network devices; split their locks off into a + * separate class since they always nest. + */ +static struct lock_class_key ipvlan_netdev_xmit_lock_key; +static struct lock_class_key ipvlan_netdev_addr_lock_key; + +#define IPVLAN_FEATURES \ + (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ + NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + +#define IPVLAN_STATE_MASK \ + ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) + +static void ipvlan_set_lockdep_class_one(struct net_device *dev, + struct netdev_queue *txq, + void *_unused) +{ + lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key); +} + +static void ipvlan_set_lockdep_class(struct net_device *dev) +{ + lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key); + netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL); +} + +static int ipvlan_init(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + const struct net_device *phy_dev = ipvlan->phy_dev; + + dev->state = (dev->state & ~IPVLAN_STATE_MASK) | + (phy_dev->state & IPVLAN_STATE_MASK); + dev->features = phy_dev->features & IPVLAN_FEATURES; + dev->features |= NETIF_F_LLTX; + dev->gso_max_size = phy_dev->gso_max_size; + dev->iflink = phy_dev->ifindex; + dev->hard_header_len = phy_dev->hard_header_len; + + ipvlan_set_lockdep_class(dev); + + ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); + if (!ipvlan->pcpu_stats) + return -ENOMEM; + + return 0; +} + +static void ipvlan_uninit(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port = ipvlan->port; + + free_percpu(ipvlan->pcpu_stats); + + port->count -= 1; + if (!port->count) + ipvlan_port_destroy(port->dev); +} + +static int ipvlan_open(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + struct ipvl_addr *addr; + + if (ipvlan->port->mode == IPVLAN_MODE_L3) + dev->flags |= IFF_NOARP; + else + dev->flags &= ~IFF_NOARP; + + if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_add(ipvlan, addr); + } + return dev_uc_add(phy_dev, phy_dev->dev_addr); +} + +static int ipvlan_stop(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + struct ipvl_addr *addr; + + dev_uc_unsync(phy_dev, dev); + dev_mc_unsync(phy_dev, dev); + + dev_uc_del(phy_dev, phy_dev->dev_addr); + + if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_del(addr, !dev->dismantle); + } + return 0; +} + +static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + int skblen = skb->len; + int ret; + + ret = ipvlan_queue_xmit(skb, dev); + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { + struct ipvl_pcpu_stats *pcptr; + + pcptr = this_cpu_ptr(ipvlan->pcpu_stats); + + u64_stats_update_begin(&pcptr->syncp); + pcptr->tx_pkts++; + pcptr->tx_bytes += skblen; + u64_stats_update_end(&pcptr->syncp); + } else { + this_cpu_inc(ipvlan->pcpu_stats->tx_drps); + } + return ret; +} + +static netdev_features_t ipvlan_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); +} + +static void ipvlan_change_rx_flags(struct net_device *dev, int change) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + + if (change & IFF_ALLMULTI) + dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1); +} + +static void ipvlan_set_broadcast_mac_filter(struct ipvl_dev *ipvlan, bool set) +{ + struct net_device *dev = ipvlan->dev; + unsigned int hashbit = ipvlan_mac_hash(dev->broadcast); + + if (set && !test_bit(hashbit, ipvlan->mac_filters)) + __set_bit(hashbit, ipvlan->mac_filters); + else if (!set && test_bit(hashbit, ipvlan->mac_filters)) + __clear_bit(hashbit, ipvlan->mac_filters); +} + +static void ipvlan_set_multicast_mac_filter(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + bitmap_fill(ipvlan->mac_filters, IPVLAN_MAC_FILTER_SIZE); + } else { + struct netdev_hw_addr *ha; + DECLARE_BITMAP(mc_filters, IPVLAN_MAC_FILTER_SIZE); + + bitmap_zero(mc_filters, IPVLAN_MAC_FILTER_SIZE); + netdev_for_each_mc_addr(ha, dev) + __set_bit(ipvlan_mac_hash(ha->addr), mc_filters); + + bitmap_copy(ipvlan->mac_filters, mc_filters, + IPVLAN_MAC_FILTER_SIZE); + } + dev_uc_sync(ipvlan->phy_dev, dev); + dev_mc_sync(ipvlan->phy_dev, dev); +} + +static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + if (ipvlan->pcpu_stats) { + struct ipvl_pcpu_stats *pcptr; + u64 rx_pkts, rx_bytes, rx_mcast, tx_pkts, tx_bytes; + u32 rx_errs = 0, tx_drps = 0; + u32 strt; + int idx; + + for_each_possible_cpu(idx) { + pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx); + do { + strt= u64_stats_fetch_begin_irq(&pcptr->syncp); + rx_pkts = pcptr->rx_pkts; + rx_bytes = pcptr->rx_bytes; + rx_mcast = pcptr->rx_mcast; + tx_pkts = pcptr->tx_pkts; + tx_bytes = pcptr->tx_bytes; + } while (u64_stats_fetch_retry_irq(&pcptr->syncp, + strt)); + + s->rx_packets += rx_pkts; + s->rx_bytes += rx_bytes; + s->multicast += rx_mcast; + s->tx_packets += tx_pkts; + s->tx_bytes += tx_bytes; + + /* u32 values are updated without syncp protection. */ + rx_errs += pcptr->rx_errs; + tx_drps += pcptr->tx_drps; + } + s->rx_errors = rx_errs; + s->rx_dropped = rx_errs; + s->tx_dropped = tx_drps; + } + return s; +} + +static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + + return vlan_vid_add(phy_dev, proto, vid); +} + +static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + + vlan_vid_del(phy_dev, proto, vid); + return 0; +} + +static const struct net_device_ops ipvlan_netdev_ops = { + .ndo_init = ipvlan_init, + .ndo_uninit = ipvlan_uninit, + .ndo_open = ipvlan_open, + .ndo_stop = ipvlan_stop, + .ndo_start_xmit = ipvlan_start_xmit, + .ndo_fix_features = ipvlan_fix_features, + .ndo_change_rx_flags = ipvlan_change_rx_flags, + .ndo_set_rx_mode = ipvlan_set_multicast_mac_filter, + .ndo_get_stats64 = ipvlan_get_stats64, + .ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid, +}; + +static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + struct net_device *phy_dev = ipvlan->phy_dev; + + /* TODO Probably use a different field than dev_addr so that the + * mac-address on the virtual device is portable and can be carried + * while the packets use the mac-addr on the physical device. + */ + return dev_hard_header(skb, phy_dev, type, daddr, + saddr ? : dev->dev_addr, len); +} + +static const struct header_ops ipvlan_header_ops = { + .create = ipvlan_hard_header, + .rebuild = eth_rebuild_header, + .parse = eth_header_parse, + .cache = eth_header_cache, + .cache_update = eth_header_cache_update, +}; + +static int ipvlan_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + + return __ethtool_get_settings(ipvlan->phy_dev, cmd); +} + +static void ipvlan_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version)); +} + +static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev) +{ + const struct ipvl_dev *ipvlan = netdev_priv(dev); + + return ipvlan->msg_enable; +} + +static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + ipvlan->msg_enable = value; +} + +static const struct ethtool_ops ipvlan_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_settings = ipvlan_ethtool_get_settings, + .get_drvinfo = ipvlan_ethtool_get_drvinfo, + .get_msglevel = ipvlan_ethtool_get_msglevel, + .set_msglevel = ipvlan_ethtool_set_msglevel, +}; + +static int ipvlan_nl_changelink(struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev); + + if (data && data[IFLA_IPVLAN_MODE]) { + u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); + + ipvlan_set_port_mode(port, nmode); + } + return 0; +} + +static size_t ipvlan_nl_getsize(const struct net_device *dev) +{ + return (0 + + nla_total_size(2) /* IFLA_IPVLAN_MODE */ + ); +} + +static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (data && data[IFLA_IPVLAN_MODE]) { + u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]); + + if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX) + return -EINVAL; + } + return 0; +} + +static int ipvlan_nl_fillinfo(struct sk_buff *skb, + const struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev); + int ret = -EINVAL; + + if (!port) + goto err; + + ret = -EMSGSIZE; + if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode)) + goto err; + + return 0; + +err: + return ret; +} + +static int ipvlan_link_new(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port; + struct net_device *phy_dev; + int err; + + if (!tb[IFLA_LINK]) + return -EINVAL; + + phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + if (!phy_dev) + return -ENODEV; + + if (netif_is_ipvlan(phy_dev)) { + struct ipvl_dev *tmp = netdev_priv(phy_dev); + + phy_dev = tmp->phy_dev; + } else if (!netif_is_ipvlan_port(phy_dev)) { + err = ipvlan_port_create(phy_dev); + if (err < 0) + return err; + } + + port = ipvlan_port_get_rtnl(phy_dev); + if (data && data[IFLA_IPVLAN_MODE]) + port->mode = nla_get_u16(data[IFLA_IPVLAN_MODE]); + + ipvlan->phy_dev = phy_dev; + ipvlan->dev = dev; + ipvlan->port = port; + ipvlan->sfeatures = IPVLAN_FEATURES; + INIT_LIST_HEAD(&ipvlan->addrs); + ipvlan->ipv4cnt = 0; + ipvlan->ipv6cnt = 0; + + /* TODO Probably put random address here to be presented to the + * world but keep using the physical-dev address for the outgoing + * packets. + */ + memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN); + + dev->priv_flags |= IFF_IPVLAN_SLAVE; + + port->count += 1; + err = register_netdevice(dev); + if (err < 0) + goto ipvlan_destroy_port; + + err = netdev_upper_dev_link(phy_dev, dev); + if (err) + goto ipvlan_destroy_port; + + list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans); + netif_stacked_transfer_operstate(phy_dev, dev); + return 0; + +ipvlan_destroy_port: + port->count -= 1; + if (!port->count) + ipvlan_port_destroy(phy_dev); + + return err; +} + +static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_addr *addr, *next; + + if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { + list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { + ipvlan_ht_addr_del(addr, !dev->dismantle); + list_del_rcu(&addr->anode); + } + } + list_del_rcu(&ipvlan->pnode); + unregister_netdevice_queue(dev, head); + netdev_upper_dev_unlink(ipvlan->phy_dev, dev); +} + +static void ipvlan_link_setup(struct net_device *dev) +{ + ether_setup(dev); + + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags |= IFF_UNICAST_FLT; + dev->netdev_ops = &ipvlan_netdev_ops; + dev->destructor = free_netdev; + dev->header_ops = &ipvlan_header_ops; + dev->ethtool_ops = &ipvlan_ethtool_ops; + dev->tx_queue_len = 0; +} + +static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] = +{ + [IFLA_IPVLAN_MODE] = { .type = NLA_U16 }, +}; + +static struct rtnl_link_ops ipvlan_link_ops = { + .kind = "ipvlan", + .priv_size = sizeof(struct ipvl_dev), + + .get_size = ipvlan_nl_getsize, + .policy = ipvlan_nl_policy, + .validate = ipvlan_nl_validate, + .fill_info = ipvlan_nl_fillinfo, + .changelink = ipvlan_nl_changelink, + .maxtype = IFLA_IPVLAN_MAX, + + .setup = ipvlan_link_setup, + .newlink = ipvlan_link_new, + .dellink = ipvlan_link_delete, +}; + +static int ipvlan_link_register(struct rtnl_link_ops *ops) +{ + return rtnl_link_register(ops); +} + +static int ipvlan_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ipvl_dev *ipvlan, *next; + struct ipvl_port *port; + LIST_HEAD(lst_kill); + + if (!netif_is_ipvlan_port(dev)) + return NOTIFY_DONE; + + port = ipvlan_port_get_rtnl(dev); + + switch (event) { + case NETDEV_CHANGE: + list_for_each_entry(ipvlan, &port->ipvlans, pnode) + netif_stacked_transfer_operstate(ipvlan->phy_dev, + ipvlan->dev); + break; + + case NETDEV_UNREGISTER: + if (dev->reg_state != NETREG_UNREGISTERING) + break; + + list_for_each_entry_safe(ipvlan, next, &port->ipvlans, + pnode) + ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev, + &lst_kill); + unregister_netdevice_many(&lst_kill); + break; + + case NETDEV_FEAT_CHANGE: + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + ipvlan->dev->features = dev->features & IPVLAN_FEATURES; + ipvlan->dev->gso_max_size = dev->gso_max_size; + netdev_features_change(ipvlan->dev); + } + break; + + case NETDEV_CHANGEMTU: + list_for_each_entry(ipvlan, &port->ipvlans, pnode) + ipvlan_adjust_mtu(ipvlan, dev); + break; + + case NETDEV_PRE_TYPE_CHANGE: + /* Forbid underlying device to change its type. */ + return NOTIFY_BAD; + } + return NOTIFY_DONE; +} + +static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + struct ipvl_addr *addr; + + if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) { + netif_err(ipvlan, ifup, ipvlan->dev, + "Failed to add IPv6=%pI6c addr for %s intf\n", + ip6_addr, ipvlan->dev->name); + return -EINVAL; + } + addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC); + if (!addr) + return -ENOMEM; + + addr->master = ipvlan; + memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); + addr->atype = IPVL_IPV6; + list_add_tail_rcu(&addr->anode, &ipvlan->addrs); + ipvlan->ipv6cnt++; + ipvlan_ht_addr_add(ipvlan, addr); + + return 0; +} + +static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + struct ipvl_addr *addr; + + addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true); + if (!addr) + return; + + ipvlan_ht_addr_del(addr, true); + list_del_rcu(&addr->anode); + ipvlan->ipv6cnt--; + WARN_ON(ipvlan->ipv6cnt < 0); + kfree_rcu(addr, rcu); + + return; +} + +static int ipvlan_addr6_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr; + struct net_device *dev = (struct net_device *)if6->idev->dev; + struct ipvl_dev *ipvlan = netdev_priv(dev); + + if (!netif_is_ipvlan(dev)) + return NOTIFY_DONE; + + if (!ipvlan || !ipvlan->port) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UP: + if (ipvlan_add_addr6(ipvlan, &if6->addr)) + return NOTIFY_BAD; + break; + + case NETDEV_DOWN: + ipvlan_del_addr6(ipvlan, &if6->addr); + break; + } + + return NOTIFY_OK; +} + +static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) +{ + struct ipvl_addr *addr; + + if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) { + netif_err(ipvlan, ifup, ipvlan->dev, + "Failed to add IPv4=%pI4 on %s intf.\n", + ip4_addr, ipvlan->dev->name); + return -EINVAL; + } + addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL); + if (!addr) + return -ENOMEM; + + addr->master = ipvlan; + memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); + addr->atype = IPVL_IPV4; + list_add_tail_rcu(&addr->anode, &ipvlan->addrs); + ipvlan->ipv4cnt++; + ipvlan_ht_addr_add(ipvlan, addr); + ipvlan_set_broadcast_mac_filter(ipvlan, true); + + return 0; +} + +static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) +{ + struct ipvl_addr *addr; + + addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false); + if (!addr) + return; + + ipvlan_ht_addr_del(addr, true); + list_del_rcu(&addr->anode); + ipvlan->ipv4cnt--; + WARN_ON(ipvlan->ipv4cnt < 0); + if (!ipvlan->ipv4cnt) + ipvlan_set_broadcast_mac_filter(ipvlan, false); + kfree_rcu(addr, rcu); + + return; +} + +static int ipvlan_addr4_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in_ifaddr *if4 = (struct in_ifaddr *)ptr; + struct net_device *dev = (struct net_device *)if4->ifa_dev->dev; + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct in_addr ip4_addr; + + if (!netif_is_ipvlan(dev)) + return NOTIFY_DONE; + + if (!ipvlan || !ipvlan->port) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UP: + ip4_addr.s_addr = if4->ifa_address; + if (ipvlan_add_addr4(ipvlan, &ip4_addr)) + return NOTIFY_BAD; + break; + + case NETDEV_DOWN: + ip4_addr.s_addr = if4->ifa_address; + ipvlan_del_addr4(ipvlan, &ip4_addr); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = { + .notifier_call = ipvlan_addr4_event, +}; + +static struct notifier_block ipvlan_notifier_block __read_mostly = { + .notifier_call = ipvlan_device_event, +}; + +static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = { + .notifier_call = ipvlan_addr6_event, +}; + +static int __init ipvlan_init_module(void) +{ + int err; + + ipvlan_init_secret(); + register_netdevice_notifier(&ipvlan_notifier_block); + register_inet6addr_notifier(&ipvlan_addr6_notifier_block); + register_inetaddr_notifier(&ipvlan_addr4_notifier_block); + + err = ipvlan_link_register(&ipvlan_link_ops); + if (err < 0) + goto error; + + return 0; +error: + unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); + unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block); + unregister_netdevice_notifier(&ipvlan_notifier_block); + return err; +} + +static void __exit ipvlan_cleanup_module(void) +{ + rtnl_link_unregister(&ipvlan_link_ops); + unregister_netdevice_notifier(&ipvlan_notifier_block); + unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); + unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block); +} + +module_init(ipvlan_init_module); +module_exit(ipvlan_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mahesh Bandewar "); +MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs"); +MODULE_ALIAS_RTNL_LINK("ipvlan"); diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c index 8ff084f1d23689590b0a6bcf7df8e16538b073ec..e8917511e1aae7c53c0d4dc04dddde4cb6ffb038 100644 --- a/drivers/net/irda/act200l-sir.c +++ b/drivers/net/irda/act200l-sir.c @@ -107,8 +107,6 @@ static int act200l_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Power on the dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -124,8 +122,6 @@ static int act200l_open(struct sir_dev *dev) static int act200l_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Power off the dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -143,8 +139,6 @@ static int act200l_change_speed(struct sir_dev *dev, unsigned speed) u8 control[3]; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Clear DTR and set RTS to enter command mode */ sirdev_set_dtr_rts(dev, FALSE, TRUE); @@ -212,8 +206,6 @@ static int act200l_reset(struct sir_dev *dev) }; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__ ); - switch (state) { case SIRDEV_STATE_DONGLE_RESET: /* Reset the dongle : set RTS low for 25 ms */ @@ -240,7 +232,8 @@ static int act200l_reset(struct sir_dev *dev) dev->speed = 9600; break; default: - IRDA_ERROR("%s(), unknown state %d\n", __func__, state); + net_err_ratelimited("%s(), unknown state %d\n", + __func__, state); ret = -1; break; } diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c index 50b2141a6103c428c195becaf7f32767081caaa9..e224b8b99517cb68792b8eedea0a4bda5b3f80f4 100644 --- a/drivers/net/irda/actisys-sir.c +++ b/drivers/net/irda/actisys-sir.c @@ -165,8 +165,7 @@ static int actisys_change_speed(struct sir_dev *dev, unsigned speed) int ret = 0; int i = 0; - IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __func__, - speed, dev->speed); + pr_debug("%s(), speed=%d (was %d)\n", __func__, speed, dev->speed); /* dongle was already resetted from irda_request state machine, * we are in known state (dongle default) diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index befa45f809c32976470d2a9f072a5580a578f791..588680a72fa13319f4fa5d45e92ee3b2eacb1617 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -154,12 +154,10 @@ static int __init ali_ircc_init(void) int reg, revision; int i = 0; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); - ret = platform_driver_register(&ali_ircc_driver); if (ret) { - IRDA_ERROR("%s, Can't register driver!\n", - ALI_IRCC_DRIVER_NAME); + net_err_ratelimited("%s, Can't register driver!\n", + ALI_IRCC_DRIVER_NAME); return ret; } @@ -168,7 +166,7 @@ static int __init ali_ircc_init(void) /* Probe for all the ALi chipsets we know about */ for (chip= chips; chip->name; chip++, i++) { - IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name); + pr_debug("%s(), Probing for %s ...\n", __func__, chip->name); /* Try all config registers for this chip */ for (cfg=0; cfg<2; cfg++) @@ -198,12 +196,13 @@ static int __init ali_ircc_init(void) if (reg == chip->cid_value) { - IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base); + pr_debug("%s(), Chip found at 0x%03x\n", + __func__, cfg_base); outb(0x1F, cfg_base); revision = inb(cfg_base+1); - IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__, - chip->name, revision); + pr_debug("%s(), Found %s chip, revision=%d\n", + __func__, chip->name, revision); /* * If the user supplies the base address, then @@ -225,15 +224,14 @@ static int __init ali_ircc_init(void) } else { - IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base); + pr_debug("%s(), No %s chip at 0x%03x\n", + __func__, chip->name, cfg_base); } /* Exit configuration */ outb(0xbb, cfg_base); } } - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); - if (ret) platform_driver_unregister(&ali_ircc_driver); @@ -250,8 +248,6 @@ static void __exit ali_ircc_cleanup(void) { int i; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); - for (i=0; i < ARRAY_SIZE(dev_self); i++) { if (dev_self[i]) ali_ircc_close(dev_self[i]); @@ -259,7 +255,6 @@ static void __exit ali_ircc_cleanup(void) platform_driver_unregister(&ali_ircc_driver); - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); } static const struct net_device_ops ali_ircc_sir_ops = { @@ -289,11 +284,9 @@ static int ali_ircc_open(int i, chipio_t *info) int dongle_id; int err; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); - if (i >= ARRAY_SIZE(dev_self)) { - IRDA_ERROR("%s(), maximum number of supported chips reached!\n", - __func__); + net_err_ratelimited("%s(), maximum number of supported chips reached!\n", + __func__); return -ENOMEM; } @@ -303,8 +296,8 @@ static int ali_ircc_open(int i, chipio_t *info) dev = alloc_irdadev(sizeof(*self)); if (dev == NULL) { - IRDA_ERROR("%s(), can't allocate memory for control block!\n", - __func__); + net_err_ratelimited("%s(), can't allocate memory for control block!\n", + __func__); return -ENOMEM; } @@ -328,8 +321,8 @@ static int ali_ircc_open(int i, chipio_t *info) /* Reserve the ioports that we need */ if (!request_region(self->io.fir_base, self->io.fir_ext, ALI_IRCC_DRIVER_NAME)) { - IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__, - self->io.fir_base); + net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n", + __func__, self->io.fir_base); err = -ENODEV; goto err_out1; } @@ -380,19 +373,20 @@ static int ali_ircc_open(int i, chipio_t *info) err = register_netdev(dev); if (err) { - IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); + net_err_ratelimited("%s(), register_netdev() failed!\n", + __func__); goto err_out4; } - IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s\n", dev->name); /* Check dongle id */ dongle_id = ali_ircc_read_dongle_id(i, info); - IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__, - ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]); + net_info_ratelimited("%s(), %s, Found dongle: %s\n", + __func__, ALI_IRCC_DRIVER_NAME, + dongle_types[dongle_id]); self->io.dongle_id = dongle_id; - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); return 0; @@ -421,8 +415,6 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self) { int iobase; - IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); iobase = self->io.fir_base; @@ -431,7 +423,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self) unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ - IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base); + pr_debug("%s(), Releasing Region %03x\n", __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) @@ -445,7 +437,6 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self) dev_self[self->index] = NULL; free_netdev(self->netdev); - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); return 0; } @@ -488,7 +479,6 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info) int cfg_base = info->cfg_base; int hi, low, reg; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); /* Enter Configuration */ outb(chip->entr1, cfg_base); @@ -507,13 +497,13 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info) info->sir_base = info->fir_base; - IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base); + pr_debug("%s(), probing fir_base=0x%03x\n", __func__, info->fir_base); /* Read IRQ control register */ outb(0x70, cfg_base); reg = inb(cfg_base+1); info->irq = reg & 0x0f; - IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq); + pr_debug("%s(), probing irq=%d\n", __func__, info->irq); /* Read DMA channel */ outb(0x74, cfg_base); @@ -521,26 +511,26 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info) info->dma = reg & 0x07; if(info->dma == 0x04) - IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__); + net_warn_ratelimited("%s(), No DMA channel assigned !\n", + __func__); else - IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma); + pr_debug("%s(), probing dma=%d\n", __func__, info->dma); /* Read Enabled Status */ outb(0x30, cfg_base); reg = inb(cfg_base+1); info->enabled = (reg & 0x80) && (reg & 0x01); - IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled); + pr_debug("%s(), probing enabled=%d\n", __func__, info->enabled); /* Read Power Status */ outb(0x22, cfg_base); reg = inb(cfg_base+1); info->suspended = (reg & 0x20); - IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended); + pr_debug("%s(), probing suspended=%d\n", __func__, info->suspended); /* Exit configuration */ outb(0xbb, cfg_base); - IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__); return 0; } @@ -558,7 +548,6 @@ static int ali_ircc_setup(chipio_t *info) int version; int iobase = info->fir_base; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); /* Locking comments : * Most operations here need to be protected. We are called before @@ -578,8 +567,8 @@ static int ali_ircc_setup(chipio_t *info) /* Should be 0x00 in the M1535/M1535D */ if(version != 0x00) { - IRDA_ERROR("%s, Wrong chip version %02x\n", - ALI_IRCC_DRIVER_NAME, version); + net_err_ratelimited("%s, Wrong chip version %02x\n", + ALI_IRCC_DRIVER_NAME, version); return -1; } @@ -612,14 +601,13 @@ static int ali_ircc_setup(chipio_t *info) /* Switch to SIR space */ FIR2SIR(iobase); - IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n", - ALI_IRCC_DRIVER_NAME); + net_info_ratelimited("%s, driver loaded (Benjamin Kong)\n", + ALI_IRCC_DRIVER_NAME); /* Enable receive interrupts */ // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM // Turn on the interrupts in ali_ircc_net_open - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return 0; } @@ -636,7 +624,6 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info) int dongle_id, reg; int cfg_base = info->cfg_base; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); /* Enter Configuration */ outb(chips[i].entr1, cfg_base); @@ -650,13 +637,12 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info) outb(0xf0, cfg_base); reg = inb(cfg_base+1); dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01); - IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__, - dongle_id, dongle_types[dongle_id]); + pr_debug("%s(), probing dongle_id=%d, dongle_types=%s\n", + __func__, dongle_id, dongle_types[dongle_id]); /* Exit configuration */ outb(0xbb, cfg_base); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return dongle_id; } @@ -673,7 +659,6 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id) struct ali_ircc_cb *self; int ret; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); self = netdev_priv(dev); @@ -687,7 +672,6 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id) spin_unlock(&self->lock); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); return ret; } /* @@ -701,7 +685,6 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) __u8 eir, OldMessageCount; int iobase, tmp; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__); iobase = self->io.fir_base; @@ -714,10 +697,10 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM eir = self->InterruptID & self->ier; /* Mask out the interesting ones */ - IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID); - IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus); - IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier); - IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir); + pr_debug("%s(), self->InterruptID = %x\n", __func__, self->InterruptID); + pr_debug("%s(), self->LineStatus = %x\n", __func__, self->LineStatus); + pr_debug("%s(), self->ier = %x\n", __func__, self->ier); + pr_debug("%s(), eir = %x\n", __func__, eir); /* Disable interrupts */ SetCOMInterrupts(self, FALSE); @@ -728,7 +711,8 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) { if (self->io.direction == IO_XMIT) /* TX */ { - IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__); + pr_debug("%s(), ******* IIR_EOM (Tx) *******\n", + __func__); if(ali_ircc_dma_xmit_complete(self)) { @@ -747,23 +731,27 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) } else /* RX */ { - IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__); + pr_debug("%s(), ******* IIR_EOM (Rx) *******\n", + __func__); if(OldMessageCount > ((self->LineStatus+1) & 0x07)) { self->rcvFramesOverflow = TRUE; - IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__); + pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE ********\n", + __func__); } if (ali_ircc_dma_receive_complete(self)) { - IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__); + pr_debug("%s(), ******* receive complete ********\n", + __func__); self->ier = IER_EOM; } else { - IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__); + pr_debug("%s(), ******* Not receive complete ********\n", + __func__); self->ier = IER_EOM | IER_TIMER; } @@ -776,7 +764,8 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) if(OldMessageCount > ((self->LineStatus+1) & 0x07)) { self->rcvFramesOverflow = TRUE; - IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__); + pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE *******\n", + __func__); } /* Disable Timer */ switch_bank(iobase, BANK1); @@ -808,7 +797,6 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) /* Restore Interrupt */ SetCOMInterrupts(self, TRUE); - IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__); return IRQ_RETVAL(eir); } @@ -823,7 +811,6 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self) int iobase; int iir, lsr; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); iobase = self->io.sir_base; @@ -832,13 +819,13 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self) /* Clear interrupt */ lsr = inb(iobase+UART_LSR); - IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__, - iir, lsr, iobase); + pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n", + __func__, iir, lsr, iobase); switch (iir) { case UART_IIR_RLSI: - IRDA_DEBUG(2, "%s(), RLSI\n", __func__); + pr_debug("%s(), RLSI\n", __func__); break; case UART_IIR_RDI: /* Receive interrupt */ @@ -852,15 +839,14 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self) } break; default: - IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir); + pr_debug("%s(), unhandled IIR=%#x\n", + __func__, iir); break; } } - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__); - return IRQ_RETVAL(iir); } @@ -876,7 +862,6 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self) int boguscount = 0; int iobase; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); IRDA_ASSERT(self != NULL, return;); iobase = self->io.sir_base; @@ -891,12 +876,11 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self) /* Make sure we don't stay here too long */ if (boguscount++ > 32) { - IRDA_DEBUG(2,"%s(), breaking!\n", __func__); + pr_debug("%s(), breaking!\n", __func__); break; } } while (inb(iobase+UART_LSR) & UART_LSR_DR); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } /* @@ -913,7 +897,6 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self) IRDA_ASSERT(self != NULL, return;); - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); iobase = self->io.sir_base; @@ -932,16 +915,18 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self) { /* We must wait until all data are gone */ while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT)) - IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ ); + pr_debug("%s(), UART_LSR_THRE\n", __func__); - IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed); + pr_debug("%s(), Changing speed! self->new_speed = %d\n", + __func__, self->new_speed); ali_ircc_change_speed(self, self->new_speed); self->new_speed = 0; // benjamin 2000/11/10 06:32PM if (self->io.speed > 115200) { - IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ ); + pr_debug("%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", + __func__); self->ier = IER_EOM; // SetCOMInterrupts(self, TRUE); @@ -959,7 +944,6 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self) outb(UART_IER_RDI, iobase+UART_IER); } - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) @@ -967,9 +951,8 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) struct net_device *dev = self->netdev; int iobase; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); - IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud); + pr_debug("%s(), setting speed = %d\n", __func__, baud); /* This function *must* be called with irq off and spin-lock. * - Jean II */ @@ -1008,7 +991,6 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) netif_wake_queue(self->netdev); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) @@ -1018,14 +1000,14 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) struct ali_ircc_cb *self = priv; struct net_device *dev; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; iobase = self->io.fir_base; - IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud); + pr_debug("%s(), self->io.speed = %d, change to speed = %d\n", + __func__, self->io.speed, baud); /* Come from SIR speed */ if(self->io.speed <=115200) @@ -1039,7 +1021,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) // Set Dongle Speed mode ali_ircc_change_dongle_speed(self, baud); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } /* @@ -1057,9 +1038,8 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) int lcr; /* Line control reg */ int divisor; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); - IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed); + pr_debug("%s(), Setting speed to: %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); @@ -1113,7 +1093,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) spin_unlock_irqrestore(&self->lock, flags); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) @@ -1123,14 +1102,14 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) int iobase,dongle_id; int tmp = 0; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */ dongle_id = self->io.dongle_id; /* We are already locked, no need to do it again */ - IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed); + pr_debug("%s(), Set Speed for %s , Speed = %d\n", + __func__, dongle_types[dongle_id], speed); switch_bank(iobase, BANK2); tmp = inb(iobase+FIR_IRDA_CR); @@ -1294,7 +1273,6 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } /* @@ -1307,11 +1285,10 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) { int actual = 0; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); /* Tx FIFO should be empty! */ if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) { - IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ ); + pr_debug("%s(), failed, fifo not empty!\n", __func__); return 0; } @@ -1323,7 +1300,6 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) actual++; } - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return actual; } @@ -1339,7 +1315,6 @@ static int ali_ircc_net_open(struct net_device *dev) int iobase; char hwname[32]; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); @@ -1352,9 +1327,8 @@ static int ali_ircc_net_open(struct net_device *dev) /* Request IRQ and install Interrupt Handler */ if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev)) { - IRDA_WARNING("%s, unable to allocate irq=%d\n", - ALI_IRCC_DRIVER_NAME, - self->io.irq); + net_warn_ratelimited("%s, unable to allocate irq=%d\n", + ALI_IRCC_DRIVER_NAME, self->io.irq); return -EAGAIN; } @@ -1363,9 +1337,8 @@ static int ali_ircc_net_open(struct net_device *dev) * failure. */ if (request_dma(self->io.dma, dev->name)) { - IRDA_WARNING("%s, unable to allocate dma=%d\n", - ALI_IRCC_DRIVER_NAME, - self->io.dma); + net_warn_ratelimited("%s, unable to allocate dma=%d\n", + ALI_IRCC_DRIVER_NAME, self->io.dma); free_irq(self->io.irq, dev); return -EAGAIN; } @@ -1385,7 +1358,6 @@ static int ali_ircc_net_open(struct net_device *dev) */ self->irlap = irlap_open(dev, &self->qos, hwname); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return 0; } @@ -1402,7 +1374,6 @@ static int ali_ircc_net_close(struct net_device *dev) struct ali_ircc_cb *self; //int iobase; - IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); @@ -1425,7 +1396,6 @@ static int ali_ircc_net_close(struct net_device *dev) free_irq(self->io.irq, dev); free_dma(self->io.dma); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return 0; } @@ -1445,7 +1415,6 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, __u32 speed; int mtt, diff; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); self = netdev_priv(dev); iobase = self->io.fir_base; @@ -1499,7 +1468,8 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, diff = self->now.tv_usec - self->stamp.tv_usec; /* self->stamp is set from ali_ircc_dma_receive_complete() */ - IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff); + pr_debug("%s(), ******* diff = %d *******\n", + __func__, diff); if (diff < 0) diff += 1000000; @@ -1521,7 +1491,8 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, /* Adjust for timer resolution */ mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */ - IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt); + pr_debug("%s(), ************** mtt = %d ***********\n", + __func__, mtt); /* Setup timer */ if (mtt == 1) /* 500 us */ @@ -1578,7 +1549,6 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return NETDEV_TX_OK; } @@ -1589,7 +1559,6 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self) unsigned char FIFO_OPTI, Hi, Lo; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); iobase = self->io.fir_base; @@ -1640,7 +1609,8 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self) tmp = inb(iobase+FIR_LCR_B); tmp &= ~0x20; // Disable SIP outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B); - IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); + pr_debug("%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", + __func__, inb(iobase + FIR_LCR_B)); outb(0, iobase+FIR_LSR); @@ -1650,7 +1620,6 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self) switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) @@ -1658,7 +1627,6 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) int iobase; int ret = TRUE; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); iobase = self->io.fir_base; @@ -1671,7 +1639,8 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT) { - IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__); + net_err_ratelimited("%s(), ********* LSR_FRAME_ABORT *********\n", + __func__); self->netdev->stats.tx_errors++; self->netdev->stats.tx_fifo_errors++; } @@ -1714,7 +1683,6 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return ret; } @@ -1729,7 +1697,6 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self) { int iobase, tmp; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); iobase = self->io.fir_base; @@ -1767,7 +1734,8 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self) //switch_bank(iobase, BANK0); tmp = inb(iobase+FIR_LCR_B); outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM - IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); + pr_debug("%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", + __func__, inb(iobase + FIR_LCR_B)); /* Set Rx Threshold */ switch_bank(iobase, BANK1); @@ -1779,7 +1747,6 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self) outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR); switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return 0; } @@ -1790,8 +1757,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) __u8 status, MessageCount; int len, i, iobase, val; - IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); - st_fifo = &self->st_fifo; iobase = self->io.fir_base; @@ -1799,7 +1764,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) MessageCount = inb(iobase+ FIR_LSR)&0x07; if (MessageCount > 0) - IRDA_DEBUG(0, "%s(), Message count = %d,\n", __func__ , MessageCount); + pr_debug("%s(), Message count = %d\n", __func__, MessageCount); for (i=0; i<=MessageCount; i++) { @@ -1812,11 +1777,11 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) len = len << 8; len |= inb(iobase+FIR_RX_DSR_LO); - IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len); - IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status); + pr_debug("%s(), RX Length = 0x%.2x,\n", __func__ , len); + pr_debug("%s(), RX Status = 0x%.2x,\n", __func__ , status); if (st_fifo->tail >= MAX_RX_WINDOW) { - IRDA_DEBUG(0, "%s(), window is full!\n", __func__ ); + pr_debug("%s(), window is full!\n", __func__); continue; } @@ -1839,7 +1804,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) /* Check for errors */ if ((status & 0xd8) || self->rcvFramesOverflow || (len==0)) { - IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ ); + pr_debug("%s(), ************* RX Errors ************\n", + __func__); /* Skip frame */ self->netdev->stats.rx_errors++; @@ -1849,29 +1815,34 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) if (status & LSR_FIFO_UR) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ ); + pr_debug("%s(), ************* FIFO Errors ************\n", + __func__); } if (status & LSR_FRAME_ERROR) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ ); + pr_debug("%s(), ************* FRAME Errors ************\n", + __func__); } if (status & LSR_CRC_ERROR) { self->netdev->stats.rx_crc_errors++; - IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ ); + pr_debug("%s(), ************* CRC Errors ************\n", + __func__); } if(self->rcvFramesOverflow) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ ); + pr_debug("%s(), ************* Overran DMA buffer ************\n", + __func__); } if(len == 0) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ ); + pr_debug("%s(), ********** Receive Frame Size = 0 *********\n", + __func__); } } else @@ -1883,7 +1854,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) val = inb(iobase+FIR_BSR); if ((val& BSR_FIFO_NOT_EMPTY)== 0x80) { - IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ ); + pr_debug("%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", + __func__); /* Put this entry back in fifo */ st_fifo->head--; @@ -1918,9 +1890,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) skb = dev_alloc_skb(len+1); if (skb == NULL) { - IRDA_WARNING("%s(), memory squeeze, " - "dropping frame.\n", - __func__); self->netdev->stats.rx_dropped++; return FALSE; @@ -1947,7 +1916,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) switch_bank(iobase, BANK0); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); return TRUE; } @@ -1967,7 +1935,6 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, int iobase; __u32 speed; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;); @@ -2016,7 +1983,6 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, dev_kfree_skb(skb); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return NETDEV_TX_OK; } @@ -2035,7 +2001,6 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) unsigned long flags; int ret = 0; - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); @@ -2043,11 +2008,11 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ - IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ ); + pr_debug("%s(), SIOCSBANDWIDTH\n", __func__); /* * This function will also be used by IrLAP to change the * speed, so we still must allow for speed change within @@ -2061,13 +2026,13 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) spin_unlock_irqrestore(&self->lock, flags); break; case SIOCSMEDIABUSY: /* Set media busy */ - IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ ); + pr_debug("%s(), SIOCSMEDIABUSY\n", __func__); if (!capable(CAP_NET_ADMIN)) return -EPERM; irda_device_set_media_busy(self->netdev, TRUE); break; case SIOCGRECEIVING: /* Check if we are receiving right now */ - IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ ); + pr_debug("%s(), SIOCGRECEIVING\n", __func__); /* This is protected */ irq->ifr_receiving = ali_ircc_is_receiving(self); break; @@ -2075,7 +2040,6 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ret = -EOPNOTSUPP; } - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return ret; } @@ -2092,7 +2056,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self) int status = FALSE; int iobase; - IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ ); IRDA_ASSERT(self != NULL, return FALSE;); @@ -2106,7 +2069,8 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self) if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0) { /* We are receiving something */ - IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ ); + pr_debug("%s(), We are receiving something\n", + __func__); status = TRUE; } switch_bank(iobase, BANK0); @@ -2118,7 +2082,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self) spin_unlock_irqrestore(&self->lock, flags); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); return status; } @@ -2127,7 +2090,7 @@ static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state) { struct ali_ircc_cb *self = platform_get_drvdata(dev); - IRDA_MESSAGE("%s, Suspending\n", ALI_IRCC_DRIVER_NAME); + net_info_ratelimited("%s, Suspending\n", ALI_IRCC_DRIVER_NAME); if (self->io.suspended) return 0; @@ -2148,7 +2111,7 @@ static int ali_ircc_resume(struct platform_device *dev) ali_ircc_net_open(self->netdev); - IRDA_MESSAGE("%s, Waking up\n", ALI_IRCC_DRIVER_NAME); + net_info_ratelimited("%s, Waking up\n", ALI_IRCC_DRIVER_NAME); self->io.suspended = 0; @@ -2164,7 +2127,8 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable) int iobase = self->io.fir_base; /* or sir_base */ - IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable); + pr_debug("%s(), -------- Start -------- ( Enable = %d )\n", + __func__, enable); /* Enable the interrupt which we wish to */ if (enable){ @@ -2205,14 +2169,12 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable) else outb(newMask, iobase+UART_IER); - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); } static void SIR2FIR(int iobase) { //unsigned char tmp; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); /* Already protected (change_speed() or setup()), no need to lock. * Jean II */ @@ -2228,14 +2190,12 @@ static void SIR2FIR(int iobase) //tmp |= 0x20; //outb(tmp, iobase+FIR_LCR_B); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } static void FIR2SIR(int iobase) { unsigned char val; - IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); /* Already protected (change_speed() or setup()), no need to lock. * Jean II */ @@ -2251,7 +2211,6 @@ static void FIR2SIR(int iobase) val = inb(iobase+UART_LSR); val = inb(iobase+UART_MSR); - IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ ); } MODULE_AUTHOR("Benjamin Kong "); diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index a87a82ca111f5e0aed0b9d66ec5a3cc4104a70b0..b337e6d23a88109cc929a558cb1305bea9fa3ab2 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -232,7 +232,7 @@ char head=tete; for (i=0;ipdev, PCI_COMMAND, &command); command &= ~PCI_COMMAND_MASTER; pci_write_config_byte (self->pdev, PCI_COMMAND, command); @@ -305,8 +301,6 @@ toshoboe_disablebm (struct toshoboe_cb *self) static void toshoboe_stopchip (struct toshoboe_cb *self) { - IRDA_DEBUG (4, "%s()\n", __func__); - /*Disable interrupts */ OUTB (0x0, OBOE_IER); /*Disable DMA, Disable Rx, Disable Tx */ @@ -350,7 +344,7 @@ toshoboe_setbaud (struct toshoboe_cb *self) __u16 pconfig = 0; __u8 config0l = 0; - IRDA_DEBUG (2, "%s(%d/%d)\n", __func__, self->speed, self->io.speed); + pr_debug("%s(%d/%d)\n", __func__, self->speed, self->io.speed); switch (self->speed) { @@ -482,7 +476,6 @@ toshoboe_setbaud (struct toshoboe_cb *self) static void toshoboe_enablebm (struct toshoboe_cb *self) { - IRDA_DEBUG (4, "%s()\n", __func__); pci_set_master (self->pdev); } @@ -492,8 +485,6 @@ toshoboe_initring (struct toshoboe_cb *self) { int i; - IRDA_DEBUG (4, "%s()\n", __func__); - for (i = 0; i < TX_SLOTS; ++i) { self->ring->tx[i].len = 0; @@ -550,8 +541,6 @@ toshoboe_startchip (struct toshoboe_cb *self) { __u32 physaddr; - IRDA_DEBUG (4, "%s()\n", __func__); - toshoboe_initring (self); toshoboe_enablebm (self); OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1); @@ -636,9 +625,8 @@ toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt) xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/ xbofs++; - IRDA_DEBUG (2, DRIVER_NAME - ": generated mtt of %d bytes for %d us at %d baud\n" - , xbofs,mtt,self->speed); + pr_debug(DRIVER_NAME ": generated mtt of %d bytes for %d us at %d baud\n", + xbofs, mtt, self->speed); if (xbofs > TX_LEN) { @@ -824,8 +812,6 @@ toshoboe_probe (struct toshoboe_cb *self) #endif unsigned long flags; - IRDA_DEBUG (4, "%s()\n", __func__); - if (request_irq (self->io.irq, toshoboe_probeinterrupt, self->io.irqflags, "toshoboe", (void *) self)) { @@ -983,10 +969,10 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev) IRDA_ASSERT (self != NULL, return NETDEV_TX_OK; ); - IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __func__ - ,skb->len,self->txpending,INB (OBOE_ENABLEH)); + pr_debug("%s.tx:%x(%x)%x\n", + __func__, skb->len, self->txpending, INB(OBOE_ENABLEH)); if (!cb->magic) { - IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __func__, cb->magic); + pr_debug("%s.Not IrLAP:%x\n", __func__, cb->magic); #ifdef DUMP_PACKETS _dumpbufs(skb->data,skb->len,'>'); #endif @@ -1012,8 +998,8 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev) if (self->txpending || skb->len) { self->new_speed = speed; - IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" , - __func__, speed); + pr_debug("%s: Queued TxDone scheduled speed change %d\n" , + __func__, speed); /* if no data, that's all! */ if (!skb->len) { @@ -1055,8 +1041,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev) /* which we will add a wrong checksum to */ mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt); - IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __func__ - ,skb->len,mtt,self->txpending); + pr_debug("%s.mtt:%x(%x)%d\n", __func__, skb->len, mtt, self->txpending); if (mtt) { self->ring->tx[self->txs].len = mtt & 0xfff; @@ -1099,8 +1084,9 @@ dumpbufs(skb->data,skb->len,'>'); if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS) { - IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __func__ - ,skb->len, self->ring->tx[self->txs].control, self->txpending); + pr_debug("%s.ful:%x(%x)%x\n", + __func__, skb->len, self->ring->tx[self->txs].control, + self->txpending); toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX); spin_unlock_irqrestore(&self->spinlock, flags); return NETDEV_TX_BUSY; @@ -1177,8 +1163,7 @@ toshoboe_interrupt (int irq, void *dev_id) if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS) self->txpending++; } - IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __func__ - ,irqstat,txp,self->txpending); + pr_debug("%s.txd(%x)%x/%x\n", __func__, irqstat, txp, self->txpending); txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK; @@ -1206,8 +1191,8 @@ toshoboe_interrupt (int irq, void *dev_id) if ((!self->txpending) && (self->new_speed)) { self->speed = self->new_speed; - IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n", - __func__, self->speed); + pr_debug("%s: Executed TxDone scheduled speed change %d\n", + __func__, self->speed); toshoboe_setbaud (self); } @@ -1222,8 +1207,8 @@ toshoboe_interrupt (int irq, void *dev_id) { int len = self->ring->rx[self->rxs].len; skb = NULL; - IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __func__ - ,len,self->ring->rx[self->rxs].control); + pr_debug("%s.rcv:%x(%x)\n", __func__ + , len, self->ring->rx[self->rxs].control); #ifdef DUMP_PACKETS dumpbufs(self->rx_bufs[self->rxs],len,'<'); @@ -1244,7 +1229,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); len -= 2; else len = 0; - IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __func__, len,enable); + pr_debug("%s.SIR:%x(%x)\n", __func__, len, enable); } #ifdef USE_MIR @@ -1254,7 +1239,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); len -= 2; else len = 0; - IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __func__, len,enable); + pr_debug("%s.MIR:%x(%x)\n", __func__, len, enable); } #endif else if (enable & OBOE_ENABLEH_FIRON) @@ -1263,10 +1248,10 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); len -= 4; /*FIXME: check this */ else len = 0; - IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __func__, len,enable); + pr_debug("%s.FIR:%x(%x)\n", __func__, len, enable); } else - IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __func__, len,enable); + pr_debug("%s.?IR:%x(%x)\n", __func__, len, enable); if (len) { @@ -1299,8 +1284,8 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); /* (SIR) data is splitted in several slots. */ /* we have to join all the received buffers received */ /*in a large buffer before checking CRC. */ - IRDA_DEBUG (0, "%s.err:%x(%x)\n", __func__ - ,len,self->ring->rx[self->rxs].control); + pr_debug("%s.err:%x(%x)\n", __func__ + , len, self->ring->rx[self->rxs].control); } self->ring->rx[self->rxs].len = 0x0; @@ -1327,8 +1312,8 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); if (irqstat & OBOE_INT_SIP) { self->int_sip++; - IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __func__ - ,self->int_sip,irqstat,self->txpending); + pr_debug("%s.sip:%x(%x)%x\n", + __func__, self->int_sip, irqstat, self->txpending); } return IRQ_HANDLED; } @@ -1341,8 +1326,6 @@ toshoboe_net_open (struct net_device *dev) unsigned long flags; int rc; - IRDA_DEBUG (4, "%s()\n", __func__); - self = netdev_priv(dev); if (self->async) @@ -1379,8 +1362,6 @@ toshoboe_net_close (struct net_device *dev) { struct toshoboe_cb *self; - IRDA_DEBUG (4, "%s()\n", __func__); - IRDA_ASSERT (dev != NULL, return -1; ); self = netdev_priv(dev); @@ -1424,7 +1405,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) IRDA_ASSERT (self != NULL, return -1; ); - IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); /* Disable interrupts & save flags */ spin_lock_irqsave(&self->spinlock, flags); @@ -1436,8 +1417,8 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) * speed, so we still must allow for speed change within * interrupt context. */ - IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __func__ - ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate ); + pr_debug("%s(BANDWIDTH), %s, (%X/%ld\n", + __func__, dev->name, INB(OBOE_STATUS), irq->ifr_baudrate); if (!in_interrupt () && !capable (CAP_NET_ADMIN)) { ret = -EPERM; goto out; @@ -1449,8 +1430,9 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) self->new_speed = irq->ifr_baudrate; break; case SIOCSMEDIABUSY: /* Set media busy */ - IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __func__ - ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) ); + pr_debug("%s(MEDIABUSY), %s, (%X/%x)\n", + __func__, dev->name, + INB(OBOE_STATUS), capable(CAP_NET_ADMIN)); if (!capable (CAP_NET_ADMIN)) { ret = -EPERM; goto out; @@ -1459,11 +1441,11 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) break; case SIOCGRECEIVING: /* Check if we are receiving right now */ irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0; - IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __func__ - ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving ); + pr_debug("%s(RECEIVING), %s, (%X/%x)\n", + __func__, dev->name, INB(OBOE_STATUS), irq->ifr_receiving); break; default: - IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); ret = -EOPNOTSUPP; } out: @@ -1490,8 +1472,6 @@ toshoboe_close (struct pci_dev *pci_dev) int i; struct toshoboe_cb *self = pci_get_drvdata(pci_dev); - IRDA_DEBUG (4, "%s()\n", __func__); - IRDA_ASSERT (self != NULL, return; ); if (!self->stopped) @@ -1538,8 +1518,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid) int ok = 0; int err; - IRDA_DEBUG (4, "%s()\n", __func__); - if ((err=pci_enable_device(pci_dev))) return err; @@ -1700,8 +1678,6 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap) unsigned long flags; int i = 10; - IRDA_DEBUG (4, "%s()\n", __func__); - if (!self || self->stopped) return 0; @@ -1728,8 +1704,6 @@ toshoboe_wakeup (struct pci_dev *pci_dev) struct toshoboe_cb *self = pci_get_drvdata(pci_dev); unsigned long flags; - IRDA_DEBUG (4, "%s()\n", __func__); - if (!self || !self->stopped) return 0; diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c index 96cdecff349d952715f63a9dabc49c1db9722298..7e0a5b8c6d53fd14a95b6c6507c69da57af2b7a9 100644 --- a/drivers/net/irda/girbil-sir.c +++ b/drivers/net/irda/girbil-sir.c @@ -86,8 +86,6 @@ static int girbil_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power on dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -102,8 +100,6 @@ static int girbil_open(struct sir_dev *dev) static int girbil_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -126,8 +122,6 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed) u8 control[2]; static int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - /* dongle alread reset - port and dongle at default speed */ switch(state) { @@ -179,7 +173,8 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed) break; default: - IRDA_ERROR("%s - undefined state %d\n", __func__, state); + net_err_ratelimited("%s - undefined state %d\n", + __func__, state); ret = -EINVAL; break; } @@ -209,8 +204,6 @@ static int girbil_reset(struct sir_dev *dev) u8 control = GIRBIL_TXEN | GIRBIL_RXEN; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch (state) { case SIRDEV_STATE_DONGLE_RESET: /* Reset dongle */ @@ -241,7 +234,8 @@ static int girbil_reset(struct sir_dev *dev) break; default: - IRDA_ERROR("%s(), undefined state %d\n", __func__, state); + net_err_ratelimited("%s(), undefined state %d\n", + __func__, state); ret = -1; break; } diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 925b78cc979746b090dadc8bbed5d837d941e874..48b2f9a321b71c530c313fc5084e3c2c4d74318f 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -176,12 +176,13 @@ static void irda_usb_build_header(struct irda_usb_cb *self, (!force) && (self->speed != -1)) { /* No speed and xbofs change here * (we'll do it later in the write callback) */ - IRDA_DEBUG(2, "%s(), not changing speed yet\n", __func__); + pr_debug("%s(), not changing speed yet\n", __func__); *header = 0; return; } - IRDA_DEBUG(2, "%s(), changing speed to %d\n", __func__, self->new_speed); + pr_debug("%s(), changing speed to %d\n", + __func__, self->new_speed); self->speed = self->new_speed; /* We will do ` self->new_speed = -1; ' in the completion * handler just in case the current URB fail - Jean II */ @@ -227,7 +228,8 @@ static void irda_usb_build_header(struct irda_usb_cb *self, /* Set the negotiated additional XBOFS */ if (self->new_xbofs != -1) { - IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __func__, self->new_xbofs); + pr_debug("%s(), changing xbofs to %d\n", + __func__, self->new_xbofs); self->xbofs = self->new_xbofs; /* We will do ` self->new_xbofs = -1; ' in the completion * handler just in case the current URB fail - Jean II */ @@ -301,13 +303,13 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self) struct urb *urb; int ret; - IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __func__, - self->new_speed, self->new_xbofs); + pr_debug("%s(), speed=%d, xbofs=%d\n", __func__, + self->new_speed, self->new_xbofs); /* Grab the speed URB */ urb = self->speed_urb; if (urb->status != 0) { - IRDA_WARNING("%s(), URB still in use!\n", __func__); + net_warn_ratelimited("%s(), URB still in use!\n", __func__); return; } @@ -333,7 +335,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self) /* Irq disabled -> GFP_ATOMIC */ if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) { - IRDA_WARNING("%s(), failed Speed URB\n", __func__); + net_warn_ratelimited("%s(), failed Speed URB\n", __func__); } } @@ -346,8 +348,6 @@ static void speed_bulk_callback(struct urb *urb) { struct irda_usb_cb *self = urb->context; - IRDA_DEBUG(2, "%s()\n", __func__); - /* We should always have a context */ IRDA_ASSERT(self != NULL, return;); /* We should always be called for the speed URB */ @@ -356,7 +356,8 @@ static void speed_bulk_callback(struct urb *urb) /* Check for timeout and other USB nasties */ if (urb->status != 0) { /* I get a lot of -ECONNABORTED = -103 here - Jean II */ - IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags); + pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n", + __func__, urb->status, urb->transfer_flags); /* Don't do anything here, that might confuse the USB layer. * Instead, we will wait for irda_usb_net_timeout(), the @@ -391,7 +392,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, s16 xbofs; int res, mtt; - IRDA_DEBUG(4, "%s() on %s\n", __func__, netdev->name); + pr_debug("%s() on %s\n", __func__, netdev->name); netif_stop_queue(netdev); @@ -402,7 +403,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, * We need to check self->present under the spinlock because * of irda_usb_disconnect() is synchronous - Jean II */ if (!self->present) { - IRDA_DEBUG(0, "%s(), Device is gone...\n", __func__); + pr_debug("%s(), Device is gone...\n", __func__); goto drop; } @@ -435,7 +436,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, } if (urb->status != 0) { - IRDA_WARNING("%s(), URB still in use!\n", __func__); + net_warn_ratelimited("%s(), URB still in use!\n", __func__); goto drop; } @@ -522,7 +523,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb, /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */ if ((res = usb_submit_urb(urb, GFP_ATOMIC))) { - IRDA_WARNING("%s(), failed Tx URB\n", __func__); + net_warn_ratelimited("%s(), failed Tx URB\n", __func__); netdev->stats.tx_errors++; /* Let USB recover : We will catch that in the watchdog */ /*netif_start_queue(netdev);*/ @@ -554,8 +555,6 @@ static void write_bulk_callback(struct urb *urb) struct sk_buff *skb = urb->context; struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context; - IRDA_DEBUG(2, "%s()\n", __func__); - /* We should always have a context */ IRDA_ASSERT(self != NULL, return;); /* We should always be called for the speed URB */ @@ -568,7 +567,8 @@ static void write_bulk_callback(struct urb *urb) /* Check for timeout and other USB nasties */ if (urb->status != 0) { /* I get a lot of -ECONNABORTED = -103 here - Jean II */ - IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags); + pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n", + __func__, urb->status, urb->transfer_flags); /* Don't do anything here, that might confuse the USB layer, * and we could go in recursion and blow the kernel stack... @@ -587,7 +587,7 @@ static void write_bulk_callback(struct urb *urb) /* If the network is closed, stop everything */ if ((!self->netopen) || (!self->present)) { - IRDA_DEBUG(0, "%s(), Network is gone...\n", __func__); + pr_debug("%s(), Network is gone...\n", __func__); spin_unlock_irqrestore(&self->lock, flags); return; } @@ -598,7 +598,7 @@ static void write_bulk_callback(struct urb *urb) (self->new_xbofs != self->xbofs)) { /* We haven't changed speed yet (because of * IUC_SPEED_BUG), so do it now - Jean II */ - IRDA_DEBUG(1, "%s(), Changing speed now...\n", __func__); + pr_debug("%s(), Changing speed now...\n", __func__); irda_usb_change_speed_xbofs(self); } else { /* New speed and xbof is now committed in hardware */ @@ -630,7 +630,7 @@ static void irda_usb_net_timeout(struct net_device *netdev) struct urb *urb; int done = 0; /* If we have made any progress */ - IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __func__); + pr_debug("%s(), Network layer thinks we timed out!\n", __func__); IRDA_ASSERT(self != NULL, return;); /* Protect us from USB callbacks, net Tx and else. */ @@ -638,7 +638,7 @@ static void irda_usb_net_timeout(struct net_device *netdev) /* self->present *MUST* be read under spinlock */ if (!self->present) { - IRDA_WARNING("%s(), device not present!\n", __func__); + net_warn_ratelimited("%s(), device not present!\n", __func__); netif_stop_queue(netdev); spin_unlock_irqrestore(&self->lock, flags); return; @@ -647,7 +647,8 @@ static void irda_usb_net_timeout(struct net_device *netdev) /* Check speed URB */ urb = self->speed_urb; if (urb->status != 0) { - IRDA_DEBUG(0, "%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags); + pr_debug("%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", + netdev->name, urb->status, urb->transfer_flags); switch (urb->status) { case -EINPROGRESS: @@ -672,7 +673,8 @@ static void irda_usb_net_timeout(struct net_device *netdev) if (urb->status != 0) { struct sk_buff *skb = urb->context; - IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags); + pr_debug("%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", + netdev->name, urb->status, urb->transfer_flags); /* Increase error count */ netdev->stats.tx_errors++; @@ -761,8 +763,6 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc struct irda_skb_cb *cb; int ret; - IRDA_DEBUG(2, "%s()\n", __func__); - /* This should never happen */ IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(urb != NULL, return;); @@ -783,8 +783,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc if (ret) { /* If this ever happen, we are in deep s***. * Basically, the Rx path will stop... */ - IRDA_WARNING("%s(), Failed to submit Rx URB %d\n", - __func__, ret); + net_warn_ratelimited("%s(), Failed to submit Rx URB %d\n", + __func__, ret); } } @@ -805,7 +805,7 @@ static void irda_usb_receive(struct urb *urb) struct urb *next_urb; unsigned int len, docopy; - IRDA_DEBUG(2, "%s(), len=%d\n", __func__, urb->actual_length); + pr_debug("%s(), len=%d\n", __func__, urb->actual_length); /* Find ourselves */ cb = (struct irda_skb_cb *) skb->cb; @@ -815,7 +815,7 @@ static void irda_usb_receive(struct urb *urb) /* If the network is closed or the device gone, stop everything */ if ((!self->netopen) || (!self->present)) { - IRDA_DEBUG(0, "%s(), Network is gone!\n", __func__); + pr_debug("%s(), Network is gone!\n", __func__); /* Don't re-submit the URB : will stall the Rx path */ return; } @@ -838,7 +838,8 @@ static void irda_usb_receive(struct urb *urb) /* Usually precursor to a hot-unplug on OHCI. */ default: self->netdev->stats.rx_errors++; - IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags); + pr_debug("%s(), RX status %d, transfer_flags 0x%04X\n", + __func__, urb->status, urb->transfer_flags); break; } /* If we received an error, we don't want to resubmit the @@ -859,7 +860,7 @@ static void irda_usb_receive(struct urb *urb) /* Check for empty frames */ if (urb->actual_length <= self->header_length) { - IRDA_WARNING("%s(), empty frame!\n", __func__); + net_warn_ratelimited("%s(), empty frame!\n", __func__); goto done; } @@ -964,8 +965,6 @@ static void irda_usb_rx_defer_expired(unsigned long data) struct irda_skb_cb *cb; struct urb *next_urb; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Find ourselves */ cb = (struct irda_skb_cb *) skb->cb; IRDA_ASSERT(cb != NULL, return;); @@ -1049,8 +1048,8 @@ static int stir421x_fw_upload(struct irda_usb_cb *self, self->bulk_out_ep), patch_block, block_size, &actual_len, msecs_to_jiffies(500)); - IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n", - __func__, actual_len, ret); + pr_debug("%s(): Bulk send %u bytes, ret=%d\n", + __func__, actual_len, ret); if (ret < 0) break; @@ -1088,8 +1087,8 @@ static int stir421x_patch_device(struct irda_usb_cb *self) return ret; /* We get a patch from userspace */ - IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n", - __func__, stir421x_fw_name, fw->size); + net_info_ratelimited("%s(): Received firmware %s (%zu bytes)\n", + __func__, stir421x_fw_name, fw->size); ret = -EINVAL; @@ -1112,8 +1111,8 @@ static int stir421x_patch_device(struct irda_usb_cb *self) + ((build / 10) << 4) + (build % 10); - IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n", - __func__, fw_version); + pr_debug("%s(): Firmware Product version %ld\n", + __func__, fw_version); } } @@ -1169,8 +1168,6 @@ static int irda_usb_net_open(struct net_device *netdev) char hwname[16]; int i; - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(netdev != NULL, return -1;); self = netdev_priv(netdev); IRDA_ASSERT(self != NULL, return -1;); @@ -1179,13 +1176,13 @@ static int irda_usb_net_open(struct net_device *netdev) /* Can only open the device if it's there */ if(!self->present) { spin_unlock_irqrestore(&self->lock, flags); - IRDA_WARNING("%s(), device not present!\n", __func__); + net_warn_ratelimited("%s(), device not present!\n", __func__); return -1; } if(self->needspatch) { spin_unlock_irqrestore(&self->lock, flags); - IRDA_WARNING("%s(), device needs patch\n", __func__) ; + net_warn_ratelimited("%s(), device needs patch\n", __func__); return -EIO ; } @@ -1227,8 +1224,6 @@ static int irda_usb_net_open(struct net_device *netdev) if (!skb) { /* If this ever happen, we are in deep s***. * Basically, we can't start the Rx path... */ - IRDA_WARNING("%s(), Failed to allocate Rx skb\n", - __func__); return -1; } //skb_reserve(newskb, USB_IRDA_HEADER - 1); @@ -1251,8 +1246,6 @@ static int irda_usb_net_close(struct net_device *netdev) struct irda_usb_cb *self; int i; - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(netdev != NULL, return -1;); self = netdev_priv(netdev); IRDA_ASSERT(self != NULL, return -1;); @@ -1306,7 +1299,7 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ @@ -1356,7 +1349,6 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self) { struct irda_class_desc *desc; - IRDA_DEBUG(3, "%s()\n", __func__); desc = self->irda_desc; @@ -1372,8 +1364,10 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self) self->qos.window_size.bits = desc->bmWindowSize; self->qos.data_size.bits = desc->bmDataSize; - IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n", - __func__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits); + pr_debug("%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n", + __func__, self->qos.baud_rate.bits, self->qos.data_size.bits, + self->qos.window_size.bits, self->qos.additional_bofs.bits, + self->qos.min_turn_time.bits); /* Don't always trust what the dongle tell us */ if(self->capability & IUC_SIR_ONLY) @@ -1416,8 +1410,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self) { struct net_device *netdev = self->netdev; - IRDA_DEBUG(1, "%s()\n", __func__); - netdev->netdev_ops = &irda_usb_netdev_ops; irda_usb_init_qos(self); @@ -1432,8 +1424,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self) */ static inline void irda_usb_close(struct irda_usb_cb *self) { - IRDA_DEBUG(1, "%s()\n", __func__); - /* Remove netdevice */ unregister_netdev(self->netdev); @@ -1505,13 +1495,15 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_ /* This is our interrupt endpoint */ self->bulk_int_ep = ep; } else { - IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __func__, ep); + net_err_ratelimited("%s(), Unrecognised endpoint %02X\n", + __func__, ep); } } } - IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n", - __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep); + pr_debug("%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n", + __func__, self->bulk_in_ep, self->bulk_out_ep, + self->bulk_out_mtu, self->bulk_int_ep); return (self->bulk_in_ep != 0) && (self->bulk_out_ep != 0); } @@ -1573,13 +1565,13 @@ static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interf 0, intf->altsetting->desc.bInterfaceNumber, desc, sizeof(*desc), 500); - IRDA_DEBUG(1, "%s(), ret=%d\n", __func__, ret); + pr_debug("%s(), ret=%d\n", __func__, ret); if (ret < sizeof(*desc)) { - IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n", - (ret<0) ? "failed" : "too short", ret); + net_warn_ratelimited("usb-irda: class_descriptor read %s (%d)\n", + ret < 0 ? "failed" : "too short", ret); } else if (desc->bDescriptorType != USB_DT_IRDA) { - IRDA_WARNING("usb-irda: bad class_descriptor type\n"); + net_warn_ratelimited("usb-irda: bad class_descriptor type\n"); } else { #ifdef IU_DUMP_CLASS_DESC @@ -1622,9 +1614,9 @@ static int irda_usb_probe(struct usb_interface *intf, * don't need to check if the dongle is really ours. * Jean II */ - IRDA_MESSAGE("IRDA-USB found at address %d, Vendor: %x, Product: %x\n", - dev->devnum, le16_to_cpu(dev->descriptor.idVendor), - le16_to_cpu(dev->descriptor.idProduct)); + net_info_ratelimited("IRDA-USB found at address %d, Vendor: %x, Product: %x\n", + dev->devnum, le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); net = alloc_irdadev(sizeof(*self)); if (!net) @@ -1680,7 +1672,8 @@ static int irda_usb_probe(struct usb_interface *intf, * specify an alternate, but very few driver do like this. * Jean II */ ret = usb_set_interface(dev, intf->altsetting->desc.bInterfaceNumber, 0); - IRDA_DEBUG(1, "usb-irda: set interface %d result %d\n", intf->altsetting->desc.bInterfaceNumber, ret); + pr_debug("usb-irda: set interface %d result %d\n", + intf->altsetting->desc.bInterfaceNumber, ret); switch (ret) { case 0: break; @@ -1688,10 +1681,11 @@ static int irda_usb_probe(struct usb_interface *intf, /* Martin Diehl says if we get a -EPIPE we should * be fine and we don't need to do a usb_clear_halt(). * - Jean II */ - IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __func__); + pr_debug("%s(), Received -EPIPE, ignoring...\n", + __func__); break; default: - IRDA_DEBUG(0, "%s(), Unknown error %d\n", __func__, ret); + pr_debug("%s(), Unknown error %d\n", __func__, ret); ret = -EIO; goto err_out_3; } @@ -1700,7 +1694,7 @@ static int irda_usb_probe(struct usb_interface *intf, interface = intf->cur_altsetting; if(!irda_usb_parse_endpoints(self, interface->endpoint, interface->desc.bNumEndpoints)) { - IRDA_ERROR("%s(), Bogus endpoints...\n", __func__); + net_err_ratelimited("%s(), Bogus endpoints...\n", __func__); ret = -EIO; goto err_out_3; } @@ -1717,7 +1711,7 @@ static int irda_usb_probe(struct usb_interface *intf, ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0), 0x02, 0x40, 0, 0, NULL, 0, 500); if (ret < 0) { - IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret); + pr_debug("usb_control_msg failed %d\n", ret); goto err_out_3; } else { mdelay(10); @@ -1746,7 +1740,7 @@ static int irda_usb_probe(struct usb_interface *intf, if (ret) goto err_out_5; - IRDA_MESSAGE("IrDA: Registered device %s\n", net->name); + net_info_ratelimited("IrDA: Registered device %s\n", net->name); usb_set_intfdata(intf, self); if (self->needspatch) { @@ -1754,7 +1748,7 @@ static int irda_usb_probe(struct usb_interface *intf, ret = stir421x_patch_device(self); self->needspatch = (ret < 0); if (self->needspatch) { - IRDA_ERROR("STIR421X: Couldn't upload patch\n"); + net_err_ratelimited("STIR421X: Couldn't upload patch\n"); goto err_out_6; } @@ -1809,8 +1803,6 @@ static void irda_usb_disconnect(struct usb_interface *intf) struct irda_usb_cb *self = usb_get_intfdata(intf); int i; - IRDA_DEBUG(1, "%s()\n", __func__); - usb_set_intfdata(intf, NULL); if (!self) return; @@ -1859,7 +1851,7 @@ static void irda_usb_disconnect(struct usb_interface *intf) /* Free self and network device */ free_netdev(self->netdev); - IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__); + pr_debug("%s(), USB IrDA Disconnected\n", __func__); } #ifdef CONFIG_PM diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index 24b6dddd7f2f58445af7d0c5af47d399c31ac799..696852eb23c3f07a9e31668c7854617943822d11 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c @@ -231,7 +231,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp, dev = priv->dev; if (!dev) { - IRDA_WARNING("%s(), not ready yet!\n", __func__); + net_warn_ratelimited("%s(), not ready yet!\n", __func__); return; } @@ -240,7 +240,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp, * Characters received with a parity error, etc? */ if (fp && *fp++) { - IRDA_DEBUG(0, "Framing or parity error!\n"); + pr_debug("Framing or parity error!\n"); sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */ return; } @@ -387,7 +387,7 @@ static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int c IRDA_ASSERT(priv != NULL, return -ENODEV;); IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;); - IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __func__, cmd); + pr_debug("%s(cmd=0x%X)\n", __func__, cmd); dev = priv->dev; IRDA_ASSERT(dev != NULL, return -1;); @@ -477,7 +477,7 @@ static int irtty_open(struct tty_struct *tty) mutex_unlock(&irtty_mutex); - IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __func__, tty->name); + pr_debug("%s - %s: irda line discipline opened\n", __func__, tty->name); return 0; @@ -528,7 +528,7 @@ static void irtty_close(struct tty_struct *tty) kfree(priv); - IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __func__, tty->name); + pr_debug("%s - %s: irda line discipline closed\n", __func__, tty->name); } /* ------------------------------------------------------- */ @@ -555,8 +555,8 @@ static int __init irtty_sir_init(void) int err; if ((err = tty_register_ldisc(N_IRDA, &irda_ldisc)) != 0) - IRDA_ERROR("IrDA: can't register line discipline (err = %d)\n", - err); + net_err_ratelimited("IrDA: can't register line discipline (err = %d)\n", + err); return err; } @@ -565,8 +565,8 @@ static void __exit irtty_sir_cleanup(void) int err; if ((err = tty_unregister_ldisc(N_IRDA))) { - IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n", - __func__, err); + net_err_ratelimited("%s(), can't unregister line discipline (err = %d)\n", + __func__, err); } } diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c index 6827777cbeea95c77ead150bccd6dc3c97a2a5d8..8eefcb44bac325e33b2c9e03635524a67c9338f2 100644 --- a/drivers/net/irda/litelink-sir.c +++ b/drivers/net/irda/litelink-sir.c @@ -76,8 +76,6 @@ static int litelink_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power up dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -93,8 +91,6 @@ static int litelink_open(struct sir_dev *dev) static int litelink_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -111,8 +107,6 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed) { int i; - IRDA_DEBUG(2, "%s()\n", __func__); - /* dongle already reset by irda-thread - current speed (dongle and * port) is the default speed (115200 for litelink!) */ @@ -154,8 +148,6 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed) */ static int litelink_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* probably the power-up can be dropped here, but with only * 15 usec delay it's not worth the risk unless somebody with * the hardware confirms it doesn't break anything... diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c index a9a81358477bcafc817b16497312ab29b850b760..a764817b47f1c342db34341475d163f26cca6094 100644 --- a/drivers/net/irda/ma600-sir.c +++ b/drivers/net/irda/ma600-sir.c @@ -65,13 +65,11 @@ static struct dongle_driver ma600 = { static int __init ma600_sir_init(void) { - IRDA_DEBUG(2, "%s()\n", __func__); return irda_register_dongle(&ma600); } static void __exit ma600_sir_cleanup(void) { - IRDA_DEBUG(2, "%s()\n", __func__); irda_unregister_dongle(&ma600); } @@ -86,8 +84,6 @@ static int ma600_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Explicitly set the speeds we can accept */ @@ -104,8 +100,6 @@ static int ma600_open(struct sir_dev *dev) static int ma600_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -174,8 +168,8 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed) { u8 byte; - IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __func__, - speed, dev->speed); + pr_debug("%s(), speed=%d (was %d)\n", __func__, + speed, dev->speed); /* dongle already reset, dongle and port at default speed (9600) */ @@ -198,13 +192,13 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed) sirdev_raw_read(dev, &byte, sizeof(byte)); if (byte != get_control_byte(speed)) { - IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n", - __func__, (unsigned) byte, - (unsigned) get_control_byte(speed)); + net_warn_ratelimited("%s(): bad control byte read-back %02x != %02x\n", + __func__, (unsigned)byte, + (unsigned)get_control_byte(speed)); return -1; } else - IRDA_DEBUG(2, "%s() control byte write read OK\n", __func__); + pr_debug("%s() control byte write read OK\n", __func__); #endif /* Set DTR, Set RTS */ @@ -236,8 +230,6 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed) static int ma600_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Reset the dongle : set DTR low for 10 ms */ sirdev_set_dtr_rts(dev, FALSE, TRUE); msleep(10); diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/net/irda/mcp2120-sir.c index 5e2f4859cee77f3febdefc03bad41dd9e70c57f4..2e33f91bfe8f60dc261698028b0e85122d167849 100644 --- a/drivers/net/irda/mcp2120-sir.c +++ b/drivers/net/irda/mcp2120-sir.c @@ -63,8 +63,6 @@ static int mcp2120_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* seems no explicit power-on required here and reset switching it on anyway */ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; @@ -76,8 +74,6 @@ static int mcp2120_open(struct sir_dev *dev) static int mcp2120_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ /* reset and inhibit mcp2120 */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -102,8 +98,6 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed) u8 control[2]; static int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch (state) { case SIRDEV_STATE_DONGLE_SPEED: /* Set DTR to enter command mode */ @@ -155,7 +149,8 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed) break; default: - IRDA_ERROR("%s(), undefine state %d\n", __func__, state); + net_err_ratelimited("%s(), undefine state %d\n", + __func__, state); ret = -EINVAL; break; } @@ -187,8 +182,6 @@ static int mcp2120_reset(struct sir_dev *dev) unsigned delay = 0; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch (state) { case SIRDEV_STATE_DONGLE_RESET: //printk("mcp2120_reset: dongle_reset\n"); @@ -213,7 +206,8 @@ static int mcp2120_reset(struct sir_dev *dev) break; default: - IRDA_ERROR("%s(), undefined state %d\n", __func__, state); + net_err_ratelimited("%s(), undefined state %d\n", + __func__, state); ret = -EINVAL; break; } diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 16f8ffb50e0456aeb52c354dca9deeda8c20a041..e4d678fbeb2f079f54ea88e07cd6e3a4391167b0 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -197,14 +197,14 @@ error: /* Setup a communication between mcs7780 and agilent chip. */ static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs) { - IRDA_WARNING("This transceiver type is not supported yet.\n"); + net_warn_ratelimited("This transceiver type is not supported yet\n"); return 1; } /* Setup a communication between mcs7780 and sharp chip. */ static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs) { - IRDA_WARNING("This transceiver type is not supported yet.\n"); + net_warn_ratelimited("This transceiver type is not supported yet\n"); return 1; } @@ -213,9 +213,9 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) { int ret = 0; __u16 rval; - char *msg; + const char *msg; - msg = "Basic transceiver setup error."; + msg = "Basic transceiver setup error"; /* read value of MODE Register, set the DRIVER and RESET bits * and write value back out to MODE Register @@ -261,7 +261,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) if(unlikely(ret)) goto error; - msg = "transceiver model specific setup error."; + msg = "transceiver model specific setup error"; switch (mcs->transceiver_type) { case MCS_TSC_VISHAY: ret = mcs_setup_transceiver_vishay(mcs); @@ -276,8 +276,8 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) break; default: - IRDA_WARNING("Unknown transceiver type: %d\n", - mcs->transceiver_type); + net_warn_ratelimited("Unknown transceiver type: %d\n", + mcs->transceiver_type); ret = 1; } if (unlikely(ret)) @@ -300,7 +300,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) goto error; } - msg = "transceiver reset."; + msg = "transceiver reset"; ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval); if (unlikely(ret != 2)) @@ -315,7 +315,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) return ret; error: - IRDA_ERROR("%s\n", msg); + net_err_ratelimited("%s\n", msg); return ret; } @@ -399,8 +399,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len) new_len = len - 2; if(unlikely(new_len <= 0)) { - IRDA_ERROR("%s short frame length %d\n", - mcs->netdev->name, new_len); + net_err_ratelimited("%s short frame length %d\n", + mcs->netdev->name, new_len); ++mcs->netdev->stats.rx_errors; ++mcs->netdev->stats.rx_length_errors; return; @@ -409,8 +409,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len) fcs = irda_calc_crc16(~fcs, buf, len); if(fcs != GOOD_FCS) { - IRDA_ERROR("crc error calc 0x%x len %d\n", - fcs, new_len); + net_err_ratelimited("crc error calc 0x%x len %d\n", + fcs, new_len); mcs->netdev->stats.rx_errors++; mcs->netdev->stats.rx_crc_errors++; return; @@ -452,8 +452,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) new_len = len - 4; if(unlikely(new_len <= 0)) { - IRDA_ERROR("%s short frame length %d\n", - mcs->netdev->name, new_len); + net_err_ratelimited("%s short frame length %d\n", + mcs->netdev->name, new_len); ++mcs->netdev->stats.rx_errors; ++mcs->netdev->stats.rx_length_errors; return; @@ -461,7 +461,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) fcs = ~(crc32_le(~0, buf, new_len)); if(fcs != get_unaligned_le32(buf + new_len)) { - IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); + net_err_ratelimited("crc error calc 0x%x len %d\n", + fcs, new_len); mcs->netdev->stats.rx_errors++; mcs->netdev->stats.rx_crc_errors++; return; @@ -583,7 +584,7 @@ static int mcs_speed_change(struct mcs_cb *mcs) } while(cnt++ < 100 && (rval & MCS_IRINTX)); if (cnt > 100) { - IRDA_ERROR("unable to change speed\n"); + net_err_ratelimited("unable to change speed\n"); ret = -EIO; goto error; } @@ -634,8 +635,8 @@ static int mcs_speed_change(struct mcs_cb *mcs) default: ret = 1; - IRDA_WARNING("Unknown transceiver type: %d\n", - mcs->transceiver_type); + net_warn_ratelimited("Unknown transceiver type: %d\n", + mcs->transceiver_type); } if (unlikely(ret)) goto error; @@ -731,7 +732,7 @@ static int mcs_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", mcs->usbdev->devnum); mcs->irlap = irlap_open(netdev, &mcs->qos, hwname); if (!mcs->irlap) { - IRDA_ERROR("mcs7780: irlap_open failed\n"); + net_err_ratelimited("mcs7780: irlap_open failed\n"); goto error2; } @@ -851,7 +852,7 @@ static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb, mcs->out_buf, wraplen, mcs_send_irq, mcs); if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) { - IRDA_ERROR("failed tx_urb: %d\n", ret); + net_err_ratelimited("failed tx_urb: %d\n", ret); switch (ret) { case -ENODEV: case -EPIPE: @@ -893,13 +894,13 @@ static int mcs_probe(struct usb_interface *intf, if (!ndev) goto error1; - IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum); + pr_debug("MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum); SET_NETDEV_DEV(ndev, &intf->dev); ret = usb_reset_configuration(udev); if (ret != 0) { - IRDA_ERROR("mcs7780: usb reset configuration failed\n"); + net_err_ratelimited("mcs7780: usb reset configuration failed\n"); goto error2; } @@ -941,8 +942,8 @@ static int mcs_probe(struct usb_interface *intf, if (ret != 0) goto error2; - IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s\n", - ndev->name); + pr_debug("IrDA: Registered MosChip MCS7780 device as %s\n", + ndev->name); mcs->transceiver_type = transceiver_type; mcs->sir_tweak = sir_tweak; @@ -972,7 +973,7 @@ static void mcs_disconnect(struct usb_interface *intf) free_netdev(mcs->netdev); usb_set_intfdata(intf, NULL); - IRDA_DEBUG(0, "MCS7780 now disconnected.\n"); + pr_debug("MCS7780 now disconnected.\n"); } module_usb_driver(mcs_driver); diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 66bc03bdb138c36cd8f717b76fe7bc9cd57cbbb3..e7317b104bfbafa9daf58ca5d1e540de8d0839a9 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -211,7 +211,8 @@ static int __init nsc_ircc_init(void) ret = platform_driver_register(&nsc_ircc_driver); if (ret) { - IRDA_ERROR("%s, Can't register driver!\n", driver_name); + net_err_ratelimited("%s, Can't register driver!\n", + driver_name); return ret; } @@ -225,8 +226,8 @@ static int __init nsc_ircc_init(void) /* Probe for all the NSC chipsets we know about */ for (chip = chips; chip->name ; chip++) { - IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, - chip->name); + pr_debug("%s(), Probing for %s ...\n", __func__, + chip->name); /* Try all config registers for this chip */ for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) { @@ -237,7 +238,8 @@ static int __init nsc_ircc_init(void) /* Read index register */ reg = inb(cfg_base); if (reg == 0xff) { - IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __func__, cfg_base); + pr_debug("%s() no chip at 0x%03x\n", + __func__, cfg_base); continue; } @@ -245,8 +247,9 @@ static int __init nsc_ircc_init(void) outb(chip->cid_index, cfg_base); id = inb(cfg_base+1); if ((id & chip->cid_mask) == chip->cid_value) { - IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", - __func__, chip->name, id & ~chip->cid_mask); + pr_debug("%s() Found %s chip, revision=%d\n", + __func__, chip->name, + id & ~chip->cid_mask); /* * If we found a correct PnP setting, @@ -260,7 +263,8 @@ static int __init nsc_ircc_init(void) info.irq = pnp_info.irq; if (info.fir_base < 0x2000) { - IRDA_MESSAGE("%s, chip->init\n", driver_name); + net_info_ratelimited("%s, chip->init\n", + driver_name); chip->init(chip, &info); } else chip->probe(chip, &info); @@ -275,7 +279,8 @@ static int __init nsc_ircc_init(void) * the chip. */ if (ret) { - IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name); + pr_debug("%s, PnP init failed\n", + driver_name); memset(&info, 0, sizeof(chipio_t)); info.cfg_base = cfg_base; info.fir_base = io[i]; @@ -297,7 +302,8 @@ static int __init nsc_ircc_init(void) } i++; } else { - IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __func__, id); + pr_debug("%s(), Wrong chip id=0x%02x\n", + __func__, id); } } } @@ -361,31 +367,29 @@ static int __init nsc_ircc_open(chipio_t *info) void *ret; int err, chip_index; - IRDA_DEBUG(2, "%s()\n", __func__); - - for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) { if (!dev_self[chip_index]) break; } if (chip_index == ARRAY_SIZE(dev_self)) { - IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__); + net_err_ratelimited("%s(), maximum number of supported chips reached!\n", + __func__); return -ENOMEM; } - IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, - info->cfg_base); + net_info_ratelimited("%s, Found chip at base=0x%03x\n", + driver_name, info->cfg_base); if ((nsc_ircc_setup(info)) == -1) return -1; - IRDA_MESSAGE("%s, driver loaded (Dag Brattli)\n", driver_name); + net_info_ratelimited("%s, driver loaded (Dag Brattli)\n", driver_name); dev = alloc_irdadev(sizeof(struct nsc_ircc_cb)); if (dev == NULL) { - IRDA_ERROR("%s(), can't allocate memory for " - "control block!\n", __func__); + net_err_ratelimited("%s(), can't allocate memory for control block!\n", + __func__); return -ENOMEM; } @@ -408,8 +412,8 @@ static int __init nsc_ircc_open(chipio_t *info) /* Reserve the ioports that we need */ ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name); if (!ret) { - IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", - __func__, self->io.fir_base); + net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n", + __func__, self->io.fir_base); err = -ENODEV; goto out1; } @@ -460,21 +464,22 @@ static int __init nsc_ircc_open(chipio_t *info) err = register_netdev(dev); if (err) { - IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); + net_err_ratelimited("%s(), register_netdev() failed!\n", + __func__); goto out4; } - IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s\n", dev->name); /* Check if user has supplied a valid dongle id or not */ if ((dongle_id <= 0) || (dongle_id >= ARRAY_SIZE(dongle_types))) { dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); - IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name, - dongle_types[dongle_id]); + net_info_ratelimited("%s, Found dongle: %s\n", + driver_name, dongle_types[dongle_id]); } else { - IRDA_MESSAGE("%s, Using dongle: %s\n", driver_name, - dongle_types[dongle_id]); + net_info_ratelimited("%s, Using dongle: %s\n", + driver_name, dongle_types[dongle_id]); } self->io.dongle_id = dongle_id; @@ -516,8 +521,6 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) { int iobase; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); iobase = self->io.fir_base; @@ -528,8 +531,8 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ - IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", - __func__, self->io.fir_base); + pr_debug("%s(), Releasing Region %03x\n", + __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) @@ -567,7 +570,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info) case 0x2e8: outb(0x15, cfg_base+1); break; case 0x3f8: outb(0x16, cfg_base+1); break; case 0x2f8: outb(0x17, cfg_base+1); break; - default: IRDA_ERROR("%s(), invalid base_address", __func__); + default: net_err_ratelimited("%s(), invalid base_address\n", __func__); } /* Control Signal Routing Register (CSRT) */ @@ -579,7 +582,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info) case 9: temp = 0x05; break; case 11: temp = 0x06; break; case 15: temp = 0x07; break; - default: IRDA_ERROR("%s(), invalid irq", __func__); + default: net_err_ratelimited("%s(), invalid irq\n", __func__); } outb(CFG_108_CSRT, cfg_base); @@ -587,7 +590,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info) case 0: outb(0x08+temp, cfg_base+1); break; case 1: outb(0x10+temp, cfg_base+1); break; case 3: outb(0x18+temp, cfg_base+1); break; - default: IRDA_ERROR("%s(), invalid dma", __func__); + default: net_err_ratelimited("%s(), invalid dma\n", __func__); } outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */ @@ -626,8 +629,8 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info) break; } info->sir_base = info->fir_base; - IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, - info->fir_base); + pr_debug("%s(), probing fir_base=0x%03x\n", __func__, + info->fir_base); /* Read control signals routing register (CSRT) */ outb(CFG_108_CSRT, cfg_base); @@ -659,7 +662,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info) info->irq = 15; break; } - IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq); + pr_debug("%s(), probing irq=%d\n", __func__, info->irq); /* Currently we only read Rx DMA but it will also be used for Tx */ switch ((reg >> 3) & 0x03) { @@ -676,7 +679,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info) info->dma = 3; break; } - IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma); + pr_debug("%s(), probing dma=%d\n", __func__, info->dma); /* Read mode control register (MCTL) */ outb(CFG_108_MCTL, cfg_base); @@ -727,7 +730,7 @@ static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info) pnp = (reg >> 3) & 0x01; if (pnp) { - IRDA_DEBUG(2, "(), Chip is in PnP mode\n"); + pr_debug("(), Chip is in PnP mode\n"); outb(0x46, cfg_base); reg = (inb(cfg_base+1) & 0xfe) << 2; @@ -831,9 +834,8 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info) int enabled; /* User is sure about his config... accept it. */ - IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): " - "io=0x%04x, irq=%d, dma=%d\n", - __func__, info->fir_base, info->irq, info->dma); + pr_debug("%s(): nsc_ircc_init_39x (user settings): io=0x%04x, irq=%d, dma=%d\n", + __func__, info->fir_base, info->irq, info->dma); /* Access bank for SP2 */ outb(CFG_39X_LDN, cfg_base); @@ -873,8 +875,8 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) int reg1, reg2, irq, irqt, dma1, dma2; int enabled, susp; - IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n", - __func__, cfg_base); + pr_debug("%s(), nsc_ircc_probe_39x, base=%d\n", + __func__, cfg_base); /* This function should be executed with irq off to avoid * another driver messing with the Super I/O bank - Jean II */ @@ -908,7 +910,8 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) outb(CFG_39X_SPC, cfg_base); susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1); - IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __func__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp); + pr_debug("%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", + __func__, reg1, reg2, irq, irqt, dma1, dma2, enabled, susp); /* Configure SP2 */ @@ -959,8 +962,8 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) pnp_info.dma = pnp_dma(dev, 0); - IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", - __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); + pr_debug("%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", + __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); if((pnp_info.fir_base == 0) || (pnp_info.irq == -1) || (pnp_info.dma == -1)) { @@ -988,13 +991,13 @@ static int nsc_ircc_setup(chipio_t *info) switch_bank(iobase, BANK3); version = inb(iobase+MID); - IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n", - __func__, driver_name, version); + pr_debug("%s() Driver %s Found chip version %02x\n", + __func__, driver_name, version); /* Should be 0x2? */ if (0x20 != (version & 0xf0)) { - IRDA_ERROR("%s, Wrong chip version %02x\n", - driver_name, version); + net_err_ratelimited("%s, Wrong chip version %02x\n", + driver_name, version); return -1; } @@ -1092,39 +1095,39 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id) switch (dongle_id) { case 0x00: /* same as */ case 0x01: /* Differential serial interface */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x02: /* same as */ case 0x03: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x04: /* Sharp RY5HD01 */ break; case 0x05: /* Reserved, but this is what the Thinkpad reports */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x06: /* Single-ended serial interface */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x07: /* Consumer-IR only */ - IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s is not for IrDA mode\n", + __func__, dongle_types[dongle_id]); break; case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ - IRDA_DEBUG(0, "%s(), %s\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s\n", + __func__, dongle_types[dongle_id]); break; case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */ outb(0x28, iobase+7); /* Set irsl[0-2] as output */ break; case 0x0A: /* same as */ case 0x0B: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x0C: /* same as */ case 0x0D: /* HP HSDL-1100/HSDL-2100 */ @@ -1138,15 +1141,15 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id) outb(0x28, iobase+7); /* Set irsl[0-2] as output */ break; case 0x0F: /* No dongle connected */ - IRDA_DEBUG(0, "%s(), %s\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s\n", + __func__, dongle_types[dongle_id]); switch_bank(iobase, BANK0); outb(0x62, iobase+MCR); break; default: - IRDA_DEBUG(0, "%s(), invalid dongle_id %#x", - __func__, dongle_id); + pr_debug("%s(), invalid dongle_id %#x", + __func__, dongle_id); } /* IRCFG1: IRSL1 and 2 are set to IrDA mode */ @@ -1177,31 +1180,31 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id) switch (dongle_id) { case 0x00: /* same as */ case 0x01: /* Differential serial interface */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x02: /* same as */ case 0x03: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x04: /* Sharp RY5HD01 */ break; case 0x05: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x06: /* Single-ended serial interface */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x07: /* Consumer-IR only */ - IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s is not for IrDA mode\n", + __func__, dongle_types[dongle_id]); break; case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ - IRDA_DEBUG(0, "%s(), %s\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s\n", + __func__, dongle_types[dongle_id]); outb(0x00, iobase+4); if (speed > 115200) outb(0x01, iobase+4); @@ -1219,8 +1222,8 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id) break; case 0x0A: /* same as */ case 0x0B: /* Reserved */ - IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s not defined by irda yet\n", + __func__, dongle_types[dongle_id]); break; case 0x0C: /* same as */ case 0x0D: /* HP HSDL-1100/HSDL-2100 */ @@ -1228,14 +1231,14 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id) case 0x0E: /* Supports SIR Mode only */ break; case 0x0F: /* No dongle connected */ - IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", - __func__, dongle_types[dongle_id]); + pr_debug("%s(), %s is not for IrDA mode\n", + __func__, dongle_types[dongle_id]); switch_bank(iobase, BANK0); outb(0x62, iobase+MCR); break; default: - IRDA_DEBUG(0, "%s(), invalid data_rate\n", __func__); + pr_debug("%s(), invalid data_rate\n", __func__); } /* Restore bank register */ outb(bank, iobase+BSR); @@ -1256,7 +1259,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed) __u8 bank; __u8 ier; /* Interrupt enable register */ - IRDA_DEBUG(2, "%s(), speed=%d\n", __func__, speed); + pr_debug("%s(), speed=%d\n", __func__, speed); IRDA_ASSERT(self != NULL, return 0;); @@ -1289,20 +1292,20 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed) outb(inb(iobase+4) | 0x04, iobase+4); mcr = MCR_MIR; - IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__); + pr_debug("%s(), handling baud of 576000\n", __func__); break; case 1152000: mcr = MCR_MIR; - IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__); + pr_debug("%s(), handling baud of 1152000\n", __func__); break; case 4000000: mcr = MCR_FIR; - IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__); + pr_debug("%s(), handling baud of 4000000\n", __func__); break; default: mcr = MCR_FIR; - IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", - __func__, speed); + pr_debug("%s(), unknown baud rate of %d\n", + __func__, speed); break; } @@ -1609,15 +1612,13 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size) int actual = 0; __u8 bank; - IRDA_DEBUG(4, "%s()\n", __func__); - /* Save current bank */ bank = inb(iobase+BSR); switch_bank(iobase, BANK0); if (!(inb_p(iobase+LSR) & LSR_TXEMP)) { - IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n", - __func__); + pr_debug("%s(), warning, FIFO not empty yet!\n", + __func__); /* FIFO may still be filled to the Tx interrupt threshold */ fifo_size -= 17; @@ -1629,8 +1630,8 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size) outb(buf[actual++], iobase+TXD); } - IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", - __func__, fifo_size, actual, len); + pr_debug("%s(), fifo_size %d ; %d sent of %d\n", + __func__, fifo_size, actual, len); /* Restore bank */ outb(bank, iobase+BSR); @@ -1651,8 +1652,6 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self) __u8 bank; int ret = TRUE; - IRDA_DEBUG(2, "%s()\n", __func__); - iobase = self->io.fir_base; /* Save current bank */ @@ -1782,7 +1781,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8); if (st_fifo->tail >= MAX_RX_WINDOW) { - IRDA_DEBUG(0, "%s(), window is full!\n", __func__); + pr_debug("%s(), window is full!\n", __func__); continue; } @@ -1872,9 +1871,6 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) skb = dev_alloc_skb(len+1); if (skb == NULL) { - IRDA_WARNING("%s(), memory squeeze, " - "dropping frame.\n", - __func__); self->netdev->stats.rx_dropped++; /* Restore bank register */ @@ -1979,7 +1975,7 @@ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir) * Need to be after self->io.direction to avoid race with * nsc_ircc_hard_xmit_sir() - Jean II */ if (self->new_speed) { - IRDA_DEBUG(2, "%s(), Changing speed!\n", __func__); + pr_debug("%s(), Changing speed!\n", __func__); self->ier = nsc_ircc_change_speed(self, self->new_speed); self->new_speed = 0; @@ -2063,9 +2059,8 @@ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase, nsc_ircc_dma_receive(self); self->ier = IER_SFIF_IE; } else - IRDA_WARNING("%s(), potential " - "Tx queue lockup !\n", - __func__); + net_warn_ratelimited("%s(), potential Tx queue lockup !\n", + __func__); } } else { /* Not finished yet, so interrupt on DMA again */ @@ -2174,7 +2169,6 @@ static int nsc_ircc_net_open(struct net_device *dev) char hwname[32]; __u8 bank; - IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); @@ -2184,8 +2178,8 @@ static int nsc_ircc_net_open(struct net_device *dev) iobase = self->io.fir_base; if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) { - IRDA_WARNING("%s, unable to allocate irq=%d\n", - driver_name, self->io.irq); + net_warn_ratelimited("%s, unable to allocate irq=%d\n", + driver_name, self->io.irq); return -EAGAIN; } /* @@ -2193,8 +2187,8 @@ static int nsc_ircc_net_open(struct net_device *dev) * failure. */ if (request_dma(self->io.dma, dev->name)) { - IRDA_WARNING("%s, unable to allocate dma=%d\n", - driver_name, self->io.dma); + net_warn_ratelimited("%s, unable to allocate dma=%d\n", + driver_name, self->io.dma); free_irq(self->io.irq, dev); return -EAGAIN; } @@ -2236,7 +2230,6 @@ static int nsc_ircc_net_close(struct net_device *dev) int iobase; __u8 bank; - IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); @@ -2290,7 +2283,7 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ @@ -2329,7 +2322,7 @@ static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state) if (self->io.suspended) return 0; - IRDA_DEBUG(1, "%s, Suspending\n", driver_name); + pr_debug("%s, Suspending\n", driver_name); rtnl_lock(); if (netif_running(self->netdev)) { @@ -2363,7 +2356,7 @@ static int nsc_ircc_resume(struct platform_device *dev) if (!self->io.suspended) return 0; - IRDA_DEBUG(1, "%s, Waking up\n", driver_name); + pr_debug("%s, Waking up\n", driver_name); rtnl_lock(); nsc_ircc_setup(&self->io); @@ -2372,8 +2365,8 @@ static int nsc_ircc_resume(struct platform_device *dev) if (netif_running(self->netdev)) { if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, self->netdev->name, self->netdev)) { - IRDA_WARNING("%s, unable to allocate irq=%d\n", - driver_name, self->io.irq); + net_warn_ratelimited("%s, unable to allocate irq=%d\n", + driver_name, self->io.irq); /* * Don't fail resume process, just kill this diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c index f237136f38278ca018f6d612bd48a28568b50104..a7c2e990ae69414da06f4e4859f8069459e95079 100644 --- a/drivers/net/irda/old_belkin-sir.c +++ b/drivers/net/irda/old_belkin-sir.c @@ -90,8 +90,6 @@ static int old_belkin_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power on dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); @@ -108,8 +106,6 @@ static int old_belkin_open(struct sir_dev *dev) static int old_belkin_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -123,8 +119,6 @@ static int old_belkin_close(struct sir_dev *dev) */ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed) { - IRDA_DEBUG(2, "%s()\n", __func__); - dev->speed = 9600; return (speed==dev->speed) ? 0 : -EINVAL; } @@ -137,8 +131,6 @@ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed) */ static int old_belkin_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* This dongles speed "defaults" to 9600 bps ;-) */ dev->speed = 9600; diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 43e9ab4f4d7edc0b6771c1664d2064c164e37ec3..6af26a7d787ccead482b23638de657957486478d 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -82,7 +82,7 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev) return 0; default: - IRDA_ERROR("%s - undefined state\n", __func__); + net_err_ratelimited("%s - undefined state\n", __func__); return -EINVAL; } fsm->substate = next_state; @@ -109,11 +109,11 @@ static void sirdev_config_fsm(struct work_struct *work) int ret = -1; unsigned delay; - IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies); + pr_debug("%s(), <%ld>\n", __func__, jiffies); do { - IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", - __func__, fsm->state, fsm->substate); + pr_debug("%s - state=0x%04x / substate=0x%04x\n", + __func__, fsm->state, fsm->substate); next_state = fsm->state; delay = 0; @@ -251,12 +251,13 @@ static void sirdev_config_fsm(struct work_struct *work) break; default: - IRDA_ERROR("%s - undefined state\n", __func__); + net_err_ratelimited("%s - undefined state\n", __func__); fsm->result = -EINVAL; /* fall thru */ case SIRDEV_STATE_ERROR: - IRDA_ERROR("%s - error: %d\n", __func__, fsm->result); + net_err_ratelimited("%s - error: %d\n", + __func__, fsm->result); #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ netif_stop_queue(dev->netdev); @@ -286,12 +287,12 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par { struct sir_fsm *fsm = &dev->fsm; - IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__, - initial_state, param); + pr_debug("%s - state=0x%04x / param=%u\n", __func__, + initial_state, param); if (down_trylock(&fsm->sem)) { if (in_interrupt() || in_atomic() || irqs_disabled()) { - IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__); + pr_debug("%s(), state machine busy!\n", __func__); return -EWOULDBLOCK; } else down(&fsm->sem); @@ -299,7 +300,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par if (fsm->state == SIRDEV_STATE_DEAD) { /* race with sirdev_close should never happen */ - IRDA_ERROR("%s(), instance staled!\n", __func__); + net_err_ratelimited("%s(), instance staled!\n", __func__); up(&fsm->sem); return -ESTALE; /* or better EPIPE? */ } @@ -344,7 +345,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type) { int err; - IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type); + pr_debug("%s : requesting dongle %d.\n", __func__, type); err = sirdev_schedule_dongle_open(dev, type); if (unlikely(err)) @@ -379,7 +380,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len) ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); if (ret > 0) { - IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__); + pr_debug("%s(), raw-tx started\n", __func__); dev->tx_buff.data += ret; dev->tx_buff.len -= ret; @@ -439,8 +440,8 @@ void sirdev_write_complete(struct sir_dev *dev) spin_lock_irqsave(&dev->tx_lock, flags); - IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", - __func__, dev->tx_buff.len); + pr_debug("%s() - dev->tx_buff.len = %d\n", + __func__, dev->tx_buff.len); if (likely(dev->tx_buff.len > 0)) { /* Write data left in transmit buffer */ @@ -452,8 +453,8 @@ void sirdev_write_complete(struct sir_dev *dev) } else if (unlikely(actual<0)) { /* could be dropped later when we have tx_timeout to recover */ - IRDA_ERROR("%s: drv->do_write failed (%d)\n", - __func__, actual); + net_err_ratelimited("%s: drv->do_write failed (%d)\n", + __func__, actual); if ((skb=dev->tx_skb) != NULL) { dev->tx_skb = NULL; dev_kfree_skb_any(skb); @@ -474,7 +475,7 @@ void sirdev_write_complete(struct sir_dev *dev) * restarted when the irda-thread has completed the request. */ - IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__); + pr_debug("%s(), raw-tx done\n", __func__); dev->raw_tx = 0; goto done; /* no post-frame handling in raw mode */ } @@ -491,7 +492,7 @@ void sirdev_write_complete(struct sir_dev *dev) * re-activated. */ - IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__); + pr_debug("%s(), finished with frame!\n", __func__); if ((skb=dev->tx_skb) != NULL) { dev->tx_skb = NULL; @@ -501,14 +502,14 @@ void sirdev_write_complete(struct sir_dev *dev) } if (unlikely(dev->new_speed > 0)) { - IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__); + pr_debug("%s(), Changing speed!\n", __func__); err = sirdev_schedule_speed(dev, dev->new_speed); if (unlikely(err)) { /* should never happen * forget the speed change and hope the stack recovers */ - IRDA_ERROR("%s - schedule speed change failed: %d\n", - __func__, err); + net_err_ratelimited("%s - schedule speed change failed: %d\n", + __func__, err); netif_wake_queue(dev->netdev); } /* else: success @@ -535,13 +536,13 @@ EXPORT_SYMBOL(sirdev_write_complete); int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) { if (!dev || !dev->netdev) { - IRDA_WARNING("%s(), not ready yet!\n", __func__); + net_warn_ratelimited("%s(), not ready yet!\n", __func__); return -1; } if (!dev->irlap) { - IRDA_WARNING("%s - too early: %p / %zd!\n", - __func__, cp, count); + net_warn_ratelimited("%s - too early: %p / %zd!\n", + __func__, cp, count); return -1; } @@ -551,7 +552,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) */ irda_device_set_media_busy(dev->netdev, TRUE); dev->netdev->stats.rx_dropped++; - IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count); + pr_debug("%s; rx-drop: %zd\n", __func__, count); return 0; } @@ -597,7 +598,7 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb, netif_stop_queue(ndev); - IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len); + pr_debug("%s(), skb->len = %d\n", __func__, skb->len); speed = irda_get_next_speed(skb); if ((speed != dev->speed) && (speed != -1)) { @@ -634,7 +635,7 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb, /* Check problems */ if(spin_is_locked(&dev->tx_lock)) { - IRDA_DEBUG(3, "%s(), write not completed\n", __func__); + pr_debug("%s(), write not completed\n", __func__); } /* serialize with write completion */ @@ -661,8 +662,8 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb, } else if (unlikely(actual < 0)) { /* could be dropped later when we have tx_timeout to recover */ - IRDA_ERROR("%s: drv->do_write failed (%d)\n", - __func__, actual); + net_err_ratelimited("%s: drv->do_write failed (%d)\n", + __func__, actual); dev_kfree_skb_any(skb); dev->netdev->stats.tx_errors++; dev->netdev->stats.tx_dropped++; @@ -683,7 +684,7 @@ static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) IRDA_ASSERT(dev != NULL, return -1;); - IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ @@ -800,8 +801,6 @@ static int sirdev_open(struct net_device *ndev) if (!try_module_get(drv->owner)) return -ESTALE; - IRDA_DEBUG(2, "%s()\n", __func__); - if (sirdev_alloc_buffers(dev)) goto errout_dec; @@ -818,7 +817,7 @@ static int sirdev_open(struct net_device *ndev) netif_wake_queue(ndev); - IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed); + pr_debug("%s - done, speed = %d\n", __func__, dev->speed); return 0; @@ -838,7 +837,7 @@ static int sirdev_close(struct net_device *ndev) struct sir_dev *dev = netdev_priv(ndev); const struct sir_driver *drv; -// IRDA_DEBUG(0, "%s\n", __func__); +/* pr_debug("%s\n", __func__); */ netif_stop_queue(ndev); @@ -880,7 +879,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n struct net_device *ndev; struct sir_dev *dev; - IRDA_DEBUG(0, "%s - %s\n", __func__, name); + pr_debug("%s - %s\n", __func__, name); /* instead of adding tests to protect against drv->do_write==NULL * at several places we refuse to create a sir_dev instance for @@ -894,7 +893,8 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n */ ndev = alloc_irdadev(sizeof(*dev)); if (ndev == NULL) { - IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__); + net_err_ratelimited("%s - Can't allocate memory for IrDA control block!\n", + __func__); goto out; } dev = netdev_priv(ndev); @@ -919,7 +919,8 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n ndev->netdev_ops = &sirdev_ops; if (register_netdev(ndev)) { - IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); + net_err_ratelimited("%s(), register_netdev() failed!\n", + __func__); goto out_freenetdev; } @@ -936,7 +937,7 @@ int sirdev_put_instance(struct sir_dev *dev) { int err = 0; - IRDA_DEBUG(0, "%s\n", __func__); + pr_debug("%s\n", __func__); atomic_set(&dev->enable_rx, 0); @@ -946,7 +947,7 @@ int sirdev_put_instance(struct sir_dev *dev) if (dev->dongle_drv) err = sirdev_schedule_dongle_close(dev); if (err) - IRDA_ERROR("%s - error %d\n", __func__, err); + net_err_ratelimited("%s - error %d\n", __func__, err); sirdev_close(dev->netdev); diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c index cfbabb63f5cc69f44c618fd4971930d779228e38..7436f73ff1bb0389444f0eec39e0b6b94efa1588 100644 --- a/drivers/net/irda/sir_dongle.c +++ b/drivers/net/irda/sir_dongle.c @@ -34,8 +34,8 @@ int irda_register_dongle(struct dongle_driver *new) struct list_head *entry; struct dongle_driver *drv; - IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n", - __func__, new->driver_name, new->type); + pr_debug("%s : registering dongle \"%s\" (%d).\n", + __func__, new->driver_name, new->type); mutex_lock(&dongle_list_lock); list_for_each(entry, &dongle_list) { diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 282120430f1271bfbfb4ea06b301d4560ef1daf0..b455ffe8850ceda13255ad13e5a52d236b3ae8b2 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -419,13 +419,16 @@ static int __init smsc_ircc_legacy_probe(void) #ifdef CONFIG_PCI if (smsc_ircc_preconfigure_subsystems(ircc_cfg, ircc_fir, ircc_sir, ircc_dma, ircc_irq) < 0) { /* Ignore errors from preconfiguration */ - IRDA_ERROR("%s, Preconfiguration failed !\n", driver_name); + net_err_ratelimited("%s, Preconfiguration failed !\n", + driver_name); } #endif if (ircc_fir > 0 && ircc_sir > 0) { - IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir); - IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir); + net_info_ratelimited(" Overriding FIR address 0x%04x\n", + ircc_fir); + net_info_ratelimited(" Overriding SIR address 0x%04x\n", + ircc_sir); if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq)) ret = -ENODEV; @@ -434,8 +437,8 @@ static int __init smsc_ircc_legacy_probe(void) /* try user provided configuration register base address */ if (ircc_cfg > 0) { - IRDA_MESSAGE(" Overriding configuration address " - "0x%04x\n", ircc_cfg); + net_info_ratelimited(" Overriding configuration address 0x%04x\n", + ircc_cfg); if (!smsc_superio_fdc(ircc_cfg)) ret = 0; if (!smsc_superio_lpc(ircc_cfg)) @@ -458,11 +461,12 @@ static int __init smsc_ircc_init(void) { int ret; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); ret = platform_driver_register(&smsc_ircc_driver); if (ret) { - IRDA_ERROR("%s, Can't register driver!\n", driver_name); + net_err_ratelimited("%s, Can't register driver!\n", + driver_name); return ret; } @@ -519,7 +523,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, struct net_device *dev; int err; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); err = smsc_ircc_present(fir_base, sir_base); if (err) @@ -527,7 +531,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, err = -ENOMEM; if (dev_count >= ARRAY_SIZE(dev_self)) { - IRDA_WARNING("%s(), too many devices!\n", __func__); + net_warn_ratelimited("%s(), too many devices!\n", __func__); goto err_out1; } @@ -536,7 +540,8 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, */ dev = alloc_irdadev(sizeof(struct smsc_ircc_cb)); if (!dev) { - IRDA_WARNING("%s() can't allocate net device\n", __func__); + net_warn_ratelimited("%s() can't allocate net device\n", + __func__); goto err_out1; } @@ -588,8 +593,8 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, err = register_netdev(self->netdev); if (err) { - IRDA_ERROR("%s, Network device registration failed!\n", - driver_name); + net_err_ratelimited("%s, Network device registration failed!\n", + driver_name); goto err_out4; } @@ -601,7 +606,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, } platform_set_drvdata(self->pldev, self); - IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s\n", dev->name); dev_count++; return 0; @@ -637,15 +642,15 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base) if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT, driver_name)) { - IRDA_WARNING("%s: can't get fir_base of 0x%03x\n", - __func__, fir_base); + net_warn_ratelimited("%s: can't get fir_base of 0x%03x\n", + __func__, fir_base); goto out1; } if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT, driver_name)) { - IRDA_WARNING("%s: can't get sir_base of 0x%03x\n", - __func__, sir_base); + net_warn_ratelimited("%s: can't get sir_base of 0x%03x\n", + __func__, sir_base); goto out2; } @@ -660,13 +665,13 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base) irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4; if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) { - IRDA_WARNING("%s(), addr 0x%04x - no device found!\n", - __func__, fir_base); + net_warn_ratelimited("%s(), addr 0x%04x - no device found!\n", + __func__, fir_base); goto out3; } - IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, " - "firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n", - chip & 0x0f, version, fir_base, sir_base, dma, irq); + net_info_ratelimited("SMsC IrDA Controller found\n IrCC version %d.%d, firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n", + chip & 0x0f, version, + fir_base, sir_base, dma, irq); return 0; @@ -704,16 +709,16 @@ static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, if (irq != IRQ_INVAL) { if (irq != chip_irq) - IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n", - driver_name, chip_irq, irq); + net_info_ratelimited("%s, Overriding IRQ - chip says %d, using %d\n", + driver_name, chip_irq, irq); self->io.irq = irq; } else self->io.irq = chip_irq; if (dma != DMA_INVAL) { if (dma != chip_dma) - IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n", - driver_name, chip_dma, dma); + net_info_ratelimited("%s, Overriding DMA - chip says %d, using %d\n", + driver_name, chip_dma, dma); self->io.dma = dma; } else self->io.dma = chip_dma; @@ -798,7 +803,7 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ @@ -852,8 +857,8 @@ static void smsc_ircc_timeout(struct net_device *dev) struct smsc_ircc_cb *self = netdev_priv(dev); unsigned long flags; - IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n", - dev->name, self->io.speed); + net_warn_ratelimited("%s: transmit timed out, changing speed to: %d\n", + dev->name, self->io.speed); spin_lock_irqsave(&self->lock, flags); smsc_ircc_sir_start(self); smsc_ircc_change_speed(self, self->io.speed); @@ -877,7 +882,7 @@ static netdev_tx_t smsc_ircc_hard_xmit_sir(struct sk_buff *skb, unsigned long flags; s32 speed; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;); @@ -952,21 +957,21 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed) ir_mode = IRCC_CFGA_IRDA_HDLC; ctrl = IRCC_CRC; fast = 0; - IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__); + pr_debug("%s(), handling baud of 576000\n", __func__); break; case 1152000: ir_mode = IRCC_CFGA_IRDA_HDLC; ctrl = IRCC_1152 | IRCC_CRC; fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA; - IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", - __func__); + pr_debug("%s(), handling baud of 1152000\n", + __func__); break; case 4000000: ir_mode = IRCC_CFGA_IRDA_4PPM; ctrl = IRCC_CRC; fast = IRCC_LCR_A_FAST; - IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", - __func__); + pr_debug("%s(), handling baud of 4000000\n", + __func__); break; } #if 0 @@ -994,7 +999,7 @@ static void smsc_ircc_fir_start(struct smsc_ircc_cb *self) struct net_device *dev; int fir_base; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; @@ -1039,7 +1044,7 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self) { int fir_base; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return;); @@ -1063,7 +1068,7 @@ static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed) struct net_device *dev; int last_speed_was_sir; - IRDA_DEBUG(0, "%s() changing speed to: %d\n", __func__, speed); + pr_debug("%s() changing speed to: %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; @@ -1131,7 +1136,7 @@ static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed) int lcr; /* Line control reg */ int divisor; - IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __func__, speed); + pr_debug("%s(), Setting speed to: %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); iobase = self->io.sir_base; @@ -1166,7 +1171,7 @@ static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed) /* Turn on interrups */ outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); - IRDA_DEBUG(2, "%s() speed changed to: %d\n", __func__, speed); + pr_debug("%s() speed changed to: %d\n", __func__, speed); } @@ -1250,7 +1255,7 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs) int iobase = self->io.fir_base; u8 ctrl; - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); #if 1 /* Disable Rx */ register_bank(iobase, 0); @@ -1304,7 +1309,7 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self) { int iobase = self->io.fir_base; - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); #if 0 /* Disable Tx */ register_bank(iobase, 0); @@ -1408,7 +1413,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self) register_bank(iobase, 0); - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); #if 0 /* Disable Rx */ register_bank(iobase, 0); @@ -1419,8 +1424,8 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self) lsr= inb(iobase + IRCC_LSR); msgcnt = inb(iobase + IRCC_LCR_B) & 0x08; - IRDA_DEBUG(2, "%s: dma count = %d\n", __func__, - get_dma_residue(self->io.dma)); + pr_debug("%s: dma count = %d\n", __func__, + get_dma_residue(self->io.dma)); len = self->rx_buff.truesize - get_dma_residue(self->io.dma); @@ -1442,17 +1447,15 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self) len -= self->io.speed < 4000000 ? 2 : 4; if (len < 2 || len > 2050) { - IRDA_WARNING("%s(), bogus len=%d\n", __func__, len); + net_warn_ratelimited("%s(), bogus len=%d\n", __func__, len); return; } - IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len); + pr_debug("%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len); skb = dev_alloc_skb(len + 1); - if (!skb) { - IRDA_WARNING("%s(), memory squeeze, dropping frame.\n", - __func__); + if (!skb) return; - } + /* Make sure IP header gets aligned */ skb_reserve(skb, 1); @@ -1491,7 +1494,7 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self) /* Make sure we don't stay here to long */ if (boguscount++ > 32) { - IRDA_DEBUG(2, "%s(), breaking!\n", __func__); + pr_debug("%s(), breaking!\n", __func__); break; } } while (inb(iobase + UART_LSR) & UART_LSR_DR); @@ -1533,7 +1536,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id) lcra = inb(iobase + IRCC_LCR_A); lsr = inb(iobase + IRCC_LSR); - IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __func__, iir); + pr_debug("%s(), iir = 0x%02x\n", __func__, iir); if (iir & IRCC_IIR_EOM) { if (self->io.direction == IO_RECV) @@ -1583,12 +1586,12 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev) /* Clear interrupt */ lsr = inb(iobase + UART_LSR); - IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", - __func__, iir, lsr, iobase); + pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n", + __func__, iir, lsr, iobase); switch (iir) { case UART_IIR_RLSI: - IRDA_DEBUG(2, "%s(), RLSI\n", __func__); + pr_debug("%s(), RLSI\n", __func__); break; case UART_IIR_RDI: /* Receive interrupt */ @@ -1600,8 +1603,8 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev) smsc_ircc_sir_write_wakeup(self); break; default: - IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", - __func__, iir); + pr_debug("%s(), unhandled IIR=%#x\n", + __func__, iir); break; } @@ -1628,12 +1631,12 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self) int status = FALSE; /* int iobase; */ - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return FALSE;); - IRDA_DEBUG(0, "%s: dma count = %d\n", __func__, - get_dma_residue(self->io.dma)); + pr_debug("%s: dma count = %d\n", __func__, + get_dma_residue(self->io.dma)); status = (self->rx_buff.state != OUTSIDE_FRAME); @@ -1648,8 +1651,8 @@ static int smsc_ircc_request_irq(struct smsc_ircc_cb *self) error = request_irq(self->io.irq, smsc_ircc_interrupt, 0, self->netdev->name, self->netdev); if (error) - IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n", - __func__, self->io.irq, error); + pr_debug("%s(), unable to allocate irq=%d, err=%d\n", + __func__, self->io.irq, error); return error; } @@ -1693,21 +1696,21 @@ static int smsc_ircc_net_open(struct net_device *dev) struct smsc_ircc_cb *self; char hwname[16]; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); if (self->io.suspended) { - IRDA_DEBUG(0, "%s(), device is suspended\n", __func__); + pr_debug("%s(), device is suspended\n", __func__); return -EAGAIN; } if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, (void *) dev)) { - IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", - __func__, self->io.irq); + pr_debug("%s(), unable to allocate irq=%d\n", + __func__, self->io.irq); return -EAGAIN; } @@ -1730,8 +1733,8 @@ static int smsc_ircc_net_open(struct net_device *dev) if (request_dma(self->io.dma, dev->name)) { smsc_ircc_net_close(dev); - IRDA_WARNING("%s(), unable to allocate DMA=%d\n", - __func__, self->io.dma); + net_warn_ratelimited("%s(), unable to allocate DMA=%d\n", + __func__, self->io.dma); return -EAGAIN; } @@ -1750,7 +1753,7 @@ static int smsc_ircc_net_close(struct net_device *dev) { struct smsc_ircc_cb *self; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); @@ -1781,7 +1784,7 @@ static int smsc_ircc_suspend(struct platform_device *dev, pm_message_t state) struct smsc_ircc_cb *self = platform_get_drvdata(dev); if (!self->io.suspended) { - IRDA_DEBUG(1, "%s, Suspending\n", driver_name); + pr_debug("%s, Suspending\n", driver_name); rtnl_lock(); if (netif_running(self->netdev)) { @@ -1802,7 +1805,7 @@ static int smsc_ircc_resume(struct platform_device *dev) struct smsc_ircc_cb *self = platform_get_drvdata(dev); if (self->io.suspended) { - IRDA_DEBUG(1, "%s, Waking up\n", driver_name); + pr_debug("%s, Waking up\n", driver_name); rtnl_lock(); smsc_ircc_init_chip(self); @@ -1833,7 +1836,7 @@ static int smsc_ircc_resume(struct platform_device *dev) */ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self) { - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return -1;); @@ -1845,13 +1848,13 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self) smsc_ircc_stop_interrupts(self); /* Release the PORTS that this driver is using */ - IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__, - self->io.fir_base); + pr_debug("%s(), releasing 0x%03x\n", __func__, + self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); - IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__, - self->io.sir_base); + pr_debug("%s(), releasing 0x%03x\n", __func__, + self->io.sir_base); release_region(self->io.sir_base, self->io.sir_ext); @@ -1872,7 +1875,7 @@ static void __exit smsc_ircc_cleanup(void) { int i; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); for (i = 0; i < 2; i++) { if (dev_self[i]) @@ -1896,7 +1899,7 @@ static void smsc_ircc_sir_start(struct smsc_ircc_cb *self) struct net_device *dev; int fir_base, sir_base; - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); IRDA_ASSERT(self != NULL, return;); dev = self->netdev; @@ -1922,7 +1925,7 @@ static void smsc_ircc_sir_start(struct smsc_ircc_cb *self) /* Turn on interrups */ outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER); - IRDA_DEBUG(3, "%s() - exit\n", __func__); + pr_debug("%s() - exit\n", __func__); outb(0x00, fir_base + IRCC_MASTER); } @@ -1932,7 +1935,7 @@ void smsc_ircc_sir_stop(struct smsc_ircc_cb *self) { int iobase; - IRDA_DEBUG(3, "%s\n", __func__); + pr_debug("%s\n", __func__); iobase = self->io.sir_base; /* Reset UART */ @@ -1958,7 +1961,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self) IRDA_ASSERT(self != NULL, return;); - IRDA_DEBUG(4, "%s\n", __func__); + pr_debug("%s\n", __func__); iobase = self->io.sir_base; @@ -1979,8 +1982,8 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self) * if we need to change the speed of the hardware */ if (self->new_speed) { - IRDA_DEBUG(5, "%s(), Changing speed to %d.\n", - __func__, self->new_speed); + pr_debug("%s(), Changing speed to %d.\n", + __func__, self->new_speed); smsc_ircc_sir_wait_hw_transmitter_finish(self); smsc_ircc_change_speed(self, self->new_speed); self->new_speed = 0; @@ -2019,7 +2022,8 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) /* Tx FIFO should be empty! */ if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) { - IRDA_WARNING("%s(), failed, fifo not empty!\n", __func__); + net_warn_ratelimited("%s(), failed, fifo not empty!\n", + __func__); return 0; } @@ -2058,14 +2062,14 @@ static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self) for (i = 0; smsc_transceivers[i].name != NULL; i++) if (smsc_transceivers[i].probe(self->io.fir_base)) { - IRDA_MESSAGE(" %s transceiver found\n", - smsc_transceivers[i].name); + net_info_ratelimited(" %s transceiver found\n", + smsc_transceivers[i].name); self->transceiver= i + 1; return; } - IRDA_MESSAGE("No transceiver found. Defaulting to %s\n", - smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name); + net_info_ratelimited("No transceiver found. Defaulting to %s\n", + smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name); self->transceiver = SMSC_IRCC2_C_DEFAULT_TRANSCEIVER; } @@ -2119,7 +2123,7 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self) udelay(1); if (count < 0) - IRDA_DEBUG(0, "%s(): stuck transmitter\n", __func__); + pr_debug("%s(): stuck transmitter\n", __func__); } @@ -2180,7 +2184,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor u8 mode, dma, irq; int ret = -ENODEV; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL) return ret; @@ -2191,7 +2195,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __func__, mode);*/ if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA)) - IRDA_WARNING("%s(): IrDA not enabled\n", __func__); + net_warn_ratelimited("%s(): IrDA not enabled\n", __func__); outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase); sirbase = inb(cfgbase + 1) << 2; @@ -2208,7 +2212,8 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase); irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK; - IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __func__, firbase, sirbase, dma, irq, mode); + net_info_ratelimited("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", + __func__, firbase, sirbase, dma, irq, mode); if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0) ret = 0; @@ -2230,7 +2235,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho unsigned short fir_io, sir_io; int ret = -ENODEV; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL) return ret; @@ -2264,7 +2269,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho static int __init smsc_access(unsigned short cfg_base, unsigned char reg) { - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); outb(reg, cfg_base); return inb(cfg_base) != reg ? -1 : 0; @@ -2274,7 +2279,7 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, { u8 devid, xdevid, rev; - IRDA_DEBUG(1, "%s\n", __func__); + pr_debug("%s\n", __func__); /* Leave configuration */ @@ -2329,16 +2334,16 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, return NULL; } - IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n", - devid, rev, cfg_base, type, chip->name); + net_info_ratelimited("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n", + devid, rev, cfg_base, type, chip->name); if (chip->rev > rev) { - IRDA_MESSAGE("Revision higher than expected\n"); + net_info_ratelimited("Revision higher than expected\n"); return NULL; } if (chip->flags & NoIRDA) - IRDA_MESSAGE("chipset does not support IRDA\n"); + net_info_ratelimited("chipset does not support IRDA\n"); return chip; } @@ -2348,8 +2353,8 @@ static int __init smsc_superio_fdc(unsigned short cfg_base) int ret = -1; if (!request_region(cfg_base, 2, driver_name)) { - IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", - __func__, cfg_base); + net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n", + __func__, cfg_base); } else { if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") || !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC")) @@ -2366,8 +2371,8 @@ static int __init smsc_superio_lpc(unsigned short cfg_base) int ret = -1; if (!request_region(cfg_base, 2, driver_name)) { - IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", - __func__, cfg_base); + net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n", + __func__, cfg_base); } else { if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") || !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC")) @@ -2529,9 +2534,8 @@ static int __init preconfigure_smsc_chip(struct outb(LPC47N227_CFGACCESSKEY, iobase); // enter configuration state outb(SMSCSIOFLAT_DEVICEID_REG, iobase); // set for device ID tmpbyte = inb(iobase +1); // Read device ID - IRDA_DEBUG(0, - "Detected Chip id: 0x%02x, setting up registers...\n", - tmpbyte); + pr_debug("Detected Chip id: 0x%02x, setting up registers...\n", + tmpbyte); /* Disable UART1 and set up SIR I/O port */ outb(0x24, iobase); // select CR24 - UART1 base addr @@ -2540,8 +2544,8 @@ static int __init preconfigure_smsc_chip(struct outb( (conf->sir_io >> 2), iobase + 1); // bits 2-9 of 0x3f8 tmpbyte = inb(iobase + 1); if (tmpbyte != (conf->sir_io >> 2) ) { - IRDA_WARNING("ERROR: could not configure SIR ioport.\n"); - IRDA_WARNING("Try to supply ircc_cfg argument.\n"); + net_warn_ratelimited("ERROR: could not configure SIR ioport\n"); + net_warn_ratelimited("Try to supply ircc_cfg argument\n"); return -ENXIO; } @@ -2553,7 +2557,7 @@ static int __init preconfigure_smsc_chip(struct outb(tmpbyte, iobase + 1); tmpbyte = inb(iobase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK; if (tmpbyte != conf->fir_irq) { - IRDA_WARNING("ERROR: could not configure FIR IRQ channel.\n"); + net_warn_ratelimited("ERROR: could not configure FIR IRQ channel\n"); return -ENXIO; } @@ -2562,7 +2566,7 @@ static int __init preconfigure_smsc_chip(struct outb((conf->fir_io >> 3), iobase + 1); tmpbyte = inb(iobase + 1); if (tmpbyte != (conf->fir_io >> 3) ) { - IRDA_WARNING("ERROR: could not configure FIR I/O port.\n"); + net_warn_ratelimited("ERROR: could not configure FIR I/O port\n"); return -ENXIO; } @@ -2571,7 +2575,7 @@ static int __init preconfigure_smsc_chip(struct outb((conf->fir_dma & LPC47N227_FIRDMASELECT_MASK), iobase + 1); // DMA tmpbyte = inb(iobase + 1) & LPC47N227_FIRDMASELECT_MASK; if (tmpbyte != (conf->fir_dma & LPC47N227_FIRDMASELECT_MASK)) { - IRDA_WARNING("ERROR: could not configure FIR DMA channel.\n"); + net_warn_ratelimited("ERROR: could not configure FIR DMA channel\n"); return -ENXIO; } @@ -2628,7 +2632,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, unsigned short tmpword; unsigned char tmpbyte; - IRDA_MESSAGE("Setting up Intel 82801 controller and SMSC device\n"); + net_info_ratelimited("Setting up Intel 82801 controller and SMSC device\n"); /* * Select the range for the COMA COM port (SIR) * Register COM_DEC: @@ -2677,7 +2681,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, default: tmpbyte |= 0x01; /* COM2 default */ } - IRDA_DEBUG(1, "COM_DEC (write): 0x%02x\n", tmpbyte); + pr_debug("COM_DEC (write): 0x%02x\n", tmpbyte); pci_write_config_byte(dev, COM_DEC, tmpbyte); /* Enable Low Pin Count interface */ @@ -2699,13 +2703,13 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, tmpword |= 0x0400; break; default: - IRDA_WARNING("Uncommon I/O base address: 0x%04x\n", - conf->cfg_base); + net_warn_ratelimited("Uncommon I/O base address: 0x%04x\n", + conf->cfg_base); break; } tmpword &= 0xfffd; /* disable LPC COMB */ tmpword |= 0x0001; /* set bit 0 : enable LPC COMA addr range (GEN2) */ - IRDA_DEBUG(1, "LPC_EN (write): 0x%04x\n", tmpword); + pr_debug("LPC_EN (write): 0x%04x\n", tmpword); pci_write_config_word(dev, LPC_EN, tmpword); /* @@ -2750,7 +2754,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, default: break; /* do not change settings */ } - IRDA_DEBUG(1, "PCI_DMA_C (write): 0x%04x\n", tmpword); + pr_debug("PCI_DMA_C (write): 0x%04x\n", tmpword); pci_write_config_word(dev, PCI_DMA_C, tmpword); /* @@ -2761,7 +2765,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev, */ tmpword = conf->fir_io & 0xfff8; tmpword |= 0x0001; - IRDA_DEBUG(1, "GEN2_DEC (write): 0x%04x\n", tmpword); + pr_debug("GEN2_DEC (write): 0x%04x\n", tmpword); pci_write_config_word(dev, GEN2_DEC, tmpword); /* Pre-configure chip */ @@ -2800,7 +2804,8 @@ static void __init preconfigure_ali_port(struct pci_dev *dev, mask = 0x08; break; default: - IRDA_ERROR("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n", port); + net_err_ratelimited("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n", + port); return; } @@ -2808,7 +2813,8 @@ static void __init preconfigure_ali_port(struct pci_dev *dev, /* Turn on the right bits */ tmpbyte |= mask; pci_write_config_byte(dev, reg, tmpbyte); - IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port); + net_info_ratelimited("Activated ALi 1533 ISA bridge port 0x%04x\n", + port); } static int __init preconfigure_through_ali(struct pci_dev *dev, @@ -2877,7 +2883,8 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, if (ircc_irq != IRQ_INVAL) tmpconf.fir_irq = ircc_irq; - IRDA_MESSAGE("Detected unconfigured %s SMSC IrDA chip, pre-configuring device.\n", conf->name); + net_info_ratelimited("Detected unconfigured %s SMSC IrDA chip, pre-configuring device\n", + conf->name); if (conf->preconfigure) ret = conf->preconfigure(dev, &tmpconf); else @@ -2922,8 +2929,8 @@ static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed) /* empty */; if (val) - IRDA_WARNING("%s(): ATC: 0x%02x\n", __func__, - inb(fir_base + IRCC_ATC)); + net_warn_ratelimited("%s(): ATC: 0x%02x\n", + __func__, inb(fir_base + IRCC_ATC)); } /* diff --git a/drivers/net/irda/tekram-sir.c b/drivers/net/irda/tekram-sir.c index 048a15422844dbf2462429d7e65501b93249922d..9dcf0c103b9d7abb9adae2fb6e14399a15210442 100644 --- a/drivers/net/irda/tekram-sir.c +++ b/drivers/net/irda/tekram-sir.c @@ -63,8 +63,8 @@ static int __init tekram_sir_init(void) { if (tekram_delay < 1 || tekram_delay > 500) tekram_delay = 200; - IRDA_DEBUG(1, "%s - using %d ms delay\n", - tekram.driver_name, tekram_delay); + pr_debug("%s - using %d ms delay\n", + tekram.driver_name, tekram_delay); return irda_register_dongle(&tekram); } @@ -77,8 +77,6 @@ static int tekram_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - sirdev_set_dtr_rts(dev, TRUE, TRUE); qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; @@ -92,8 +90,6 @@ static int tekram_open(struct sir_dev *dev) static int tekram_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -130,8 +126,6 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed) u8 byte; static int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch(state) { case SIRDEV_STATE_DONGLE_SPEED: @@ -179,7 +173,8 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed) break; default: - IRDA_ERROR("%s - undefined state %d\n", __func__, state); + net_err_ratelimited("%s - undefined state %d\n", + __func__, state); ret = -EINVAL; break; } @@ -204,8 +199,6 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed) static int tekram_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Clear DTR, Set RTS */ sirdev_set_dtr_rts(dev, FALSE, TRUE); diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c index 19ad4606b799cc427848439b389dca9360cedd6f..6d2f55959c4966851cf58e620047fdbfbafa6c6b 100644 --- a/drivers/net/irda/toim3232-sir.c +++ b/drivers/net/irda/toim3232-sir.c @@ -168,8 +168,8 @@ static int __init toim3232_sir_init(void) { if (toim3232delay < 1 || toim3232delay > 500) toim3232delay = 200; - IRDA_DEBUG(1, "%s - using %d ms delay\n", - toim3232.driver_name, toim3232delay); + pr_debug("%s - using %d ms delay\n", + toim3232.driver_name, toim3232delay); return irda_register_dongle(&toim3232); } @@ -182,8 +182,6 @@ static int toim3232_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; - IRDA_DEBUG(2, "%s()\n", __func__); - /* Pull the lines high to start with. * * For the IR320ST-2, we need to charge the main supply capacitor to @@ -210,8 +208,6 @@ static int toim3232_open(struct sir_dev *dev) static int toim3232_close(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); @@ -242,8 +238,6 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed) u8 byte; static int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__); - switch(state) { case SIRDEV_STATE_DONGLE_SPEED: @@ -345,8 +339,6 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed) static int toim3232_reset(struct sir_dev *dev) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Switch off both DTR and RTS to switch off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 36e004288ea7b0e4c896c68994f70baef7cc7fb3..6960d4cd3caed83e1e11dc661c1faf492031b20f 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -144,12 +144,10 @@ static int __init via_ircc_init(void) { int rc; - IRDA_DEBUG(3, "%s()\n", __func__); - rc = pci_register_driver(&via_driver); if (rc < 0) { - IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n", - __func__, rc); + pr_debug("%s(): error rc = %d, returning -ENODEV...\n", + __func__, rc); return -ENODEV; } return 0; @@ -162,11 +160,11 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase; chipio_t info; - IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device); + pr_debug("%s(): Device ID=(0X%X)\n", __func__, id->device); rc = pci_enable_device (pcidev); if (rc) { - IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc); + pr_debug("%s(): error rc = %d\n", __func__, rc); return -ENODEV; } @@ -177,7 +175,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) Chipset=0x3076; if (Chipset==0x3076) { - IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__); + pr_debug("%s(): Chipset = 3076\n", __func__); WriteLPCReg(7,0x0c ); temp=ReadLPCReg(0x30);//check if BIOS Enable Fir @@ -213,7 +211,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) } else rc = -ENODEV; //IR not turn on } else { //Not VT1211 - IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__); + pr_debug("%s(): Chipset = 3096\n", __func__); pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir if((bTmp&0x01)==1) { // BIOS enable FIR @@ -252,14 +250,12 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) rc = -ENODEV; //IR not turn on !!!!! }//Not VT1211 - IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc); + pr_debug("%s(): End - rc = %d\n", __func__, rc); return rc; } static void __exit via_ircc_cleanup(void) { - IRDA_DEBUG(3, "%s()\n", __func__); - /* Cleanup all instances of the driver */ pci_unregister_driver (&via_driver); } @@ -289,8 +285,6 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id) struct via_ircc_cb *self; int err; - IRDA_DEBUG(3, "%s()\n", __func__); - /* Allocate new instance of the driver */ dev = alloc_irdadev(sizeof(struct via_ircc_cb)); if (dev == NULL) @@ -316,8 +310,8 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id) /* Reserve the ioports that we need */ if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) { - IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n", - __func__, self->io.fir_base); + pr_debug("%s(), can't get iobase of 0x%03x\n", + __func__, self->io.fir_base); err = -ENODEV; goto err_out1; } @@ -391,7 +385,8 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id) if (err) goto err_out4; - IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s (via-ircc)\n", + dev->name); /* Initialise the hardware.. */ @@ -422,8 +417,6 @@ static void via_remove_one(struct pci_dev *pdev) struct via_ircc_cb *self = pci_get_drvdata(pdev); int iobase; - IRDA_DEBUG(3, "%s()\n", __func__); - iobase = self->io.fir_base; ResetChip(iobase, 5); //hardware reset. @@ -431,8 +424,8 @@ static void via_remove_one(struct pci_dev *pdev) unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ - IRDA_DEBUG(2, "%s(), Releasing Region %03x\n", - __func__, self->io.fir_base); + pr_debug("%s(), Releasing Region %03x\n", + __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) dma_free_coherent(&pdev->dev, self->tx_buff.truesize, @@ -457,8 +450,6 @@ static void via_hw_init(struct via_ircc_cb *self) { int iobase = self->io.fir_base; - IRDA_DEBUG(3, "%s()\n", __func__); - SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095 // FIFO Init EnRXFIFOReadyInt(iobase, OFF); @@ -510,7 +501,7 @@ static void via_hw_init(struct via_ircc_cb *self) */ static int via_ircc_read_dongle_id(int iobase) { - IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n"); + net_err_ratelimited("via-ircc: dongle probing not supported, please specify dongle_id module parameter\n"); return 9; /* Default to IBM */ } @@ -527,8 +518,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed, /* speed is unused, as we use IsSIROn()/IsMIROn() */ speed = speed; - IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n", - __func__, speed, iobase, dongle_id); + pr_debug("%s(): change_dongle_speed to %d for 0x%x, %d\n", + __func__, speed, iobase, dongle_id); switch (dongle_id) { @@ -617,7 +608,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed, case 0x11: /* Temic TFDS4500 */ - IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__); + pr_debug("%s: Temic TFDS4500: One RX pin, TX normal, RX inverted\n", + __func__); UseOneRX(iobase, ON); //use ONE RX....RX1 InvertTX(iobase, OFF); @@ -635,7 +627,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed, SlowIRRXLowActive(iobase, OFF); } else{ - IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__); + pr_debug("%s: Warning: TFDS4500 not running in SIR mode !\n", + __func__); } break; @@ -652,8 +645,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed, break; default: - IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n", - __func__, dongle_id); + net_err_ratelimited("%s: Error: dongle_id %d unsupported !\n", + __func__, dongle_id); } } @@ -672,7 +665,7 @@ static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed) iobase = self->io.fir_base; /* Update accounting for new speed */ self->io.speed = speed; - IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed); + pr_debug("%s: change_speed to %d bps.\n", __func__, speed); WriteReg(iobase, I_ST_CT_0, 0x0); @@ -902,10 +895,10 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase) ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - self->tx_buff.head) + self->tx_buff_dma, self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE); - IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n", - __func__, self->tx_fifo.ptr, - self->tx_fifo.queue[self->tx_fifo.ptr].len, - self->tx_fifo.len); + pr_debug("%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n", + __func__, self->tx_fifo.ptr, + self->tx_fifo.queue[self->tx_fifo.ptr].len, + self->tx_fifo.len); SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len); RXStart(iobase, OFF); @@ -926,8 +919,6 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self) int iobase; u8 Tx_status; - IRDA_DEBUG(3, "%s()\n", __func__); - iobase = self->io.fir_base; /* Disable DMA */ // DisableDmaChannel(self->io.dma); @@ -957,10 +948,9 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self) self->tx_fifo.ptr++; } } - IRDA_DEBUG(1, - "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n", - __func__, - self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free); + pr_debug("%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n", + __func__, + self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free); /* F01_S // Any frames to be sent back-to-back? if (self->tx_fifo.len) { @@ -995,8 +985,6 @@ static int via_ircc_dma_receive(struct via_ircc_cb *self) iobase = self->io.fir_base; - IRDA_DEBUG(3, "%s()\n", __func__); - self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; self->RxDataReady = 0; @@ -1078,15 +1066,15 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, if (len == 0) return TRUE; //interrupt only, data maybe move by RxT if (((len - 4) < 2) || ((len - 4) > 2048)) { - IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n", - __func__, len, RxCurCount(iobase, self), - self->RxLastCount); + pr_debug("%s(): Trouble:len=%x,CurCount=%x,LastCount=%x\n", + __func__, len, RxCurCount(iobase, self), + self->RxLastCount); hwreset(self); return FALSE; } - IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n", - __func__, - st_fifo->len, len - 4, RxCurCount(iobase, self)); + pr_debug("%s(): fifo.len=%x,len=%x,CurCount=%x..\n", + __func__, + st_fifo->len, len - 4, RxCurCount(iobase, self)); st_fifo->entries[st_fifo->tail].status = status; st_fifo->entries[st_fifo->tail].len = len; @@ -1133,8 +1121,8 @@ F01_E */ skb_put(skb, len - 4); skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); - IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__, - len - 4, self->rx_buff.data); + pr_debug("%s(): len=%x.rx_buff=%p\n", __func__, + len - 4, self->rx_buff.data); // Move to next frame self->rx_buff.data += len; @@ -1163,7 +1151,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase) len = GetRecvByte(iobase, self); - IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len); + pr_debug("%s(): len=%x\n", __func__, len); if ((len - 4) < 2) { self->netdev->stats.rx_dropped++; @@ -1248,8 +1236,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) skb_put(skb, len - 4); skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); - IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__, - len - 4, st_fifo->head); + pr_debug("%s(): len=%x.head=%x\n", __func__, + len - 4, st_fifo->head); // Move to next frame self->rx_buff.data += len; @@ -1262,10 +1250,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) } //while self->RetryCount = 0; - IRDA_DEBUG(2, - "%s(): End of upload HostStatus=%x,RxStatus=%x\n", - __func__, - GetHostStatus(iobase), GetRXStatus(iobase)); + pr_debug("%s(): End of upload HostStatus=%x,RxStatus=%x\n", + __func__, GetHostStatus(iobase), GetRXStatus(iobase)); /* * if frame is receive complete at this routine ,then upload @@ -1303,12 +1289,12 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id) spin_lock(&self->lock); iHostIntType = GetHostStatus(iobase); - IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n", - __func__, iHostIntType, - (iHostIntType & 0x40) ? "Timer" : "", - (iHostIntType & 0x20) ? "Tx" : "", - (iHostIntType & 0x10) ? "Rx" : "", - (iHostIntType & 0x0e) >> 1); + pr_debug("%s(): iHostIntType %02x: %s %s %s %02x\n", + __func__, iHostIntType, + (iHostIntType & 0x40) ? "Timer" : "", + (iHostIntType & 0x20) ? "Tx" : "", + (iHostIntType & 0x10) ? "Rx" : "", + (iHostIntType & 0x0e) >> 1); if ((iHostIntType & 0x40) != 0) { //Timer Event self->EventFlag.TimeOut++; @@ -1333,12 +1319,12 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id) if ((iHostIntType & 0x20) != 0) { //Tx Event iTxIntType = GetTXStatus(iobase); - IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n", - __func__, iTxIntType, - (iTxIntType & 0x08) ? "FIFO underr." : "", - (iTxIntType & 0x04) ? "EOM" : "", - (iTxIntType & 0x02) ? "FIFO ready" : "", - (iTxIntType & 0x01) ? "Early EOM" : ""); + pr_debug("%s(): iTxIntType %02x: %s %s %s %s\n", + __func__, iTxIntType, + (iTxIntType & 0x08) ? "FIFO underr." : "", + (iTxIntType & 0x04) ? "EOM" : "", + (iTxIntType & 0x02) ? "FIFO ready" : "", + (iTxIntType & 0x01) ? "Early EOM" : ""); if (iTxIntType & 0x4) { self->EventFlag.EOMessage++; // read and will auto clean @@ -1357,17 +1343,17 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id) /* Check if DMA has finished */ iRxIntType = GetRXStatus(iobase); - IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n", - __func__, iRxIntType, - (iRxIntType & 0x80) ? "PHY err." : "", - (iRxIntType & 0x40) ? "CRC err" : "", - (iRxIntType & 0x20) ? "FIFO overr." : "", - (iRxIntType & 0x10) ? "EOF" : "", - (iRxIntType & 0x08) ? "RxData" : "", - (iRxIntType & 0x02) ? "RxMaxLen" : "", - (iRxIntType & 0x01) ? "SIR bad" : ""); + pr_debug("%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n", + __func__, iRxIntType, + (iRxIntType & 0x80) ? "PHY err." : "", + (iRxIntType & 0x40) ? "CRC err" : "", + (iRxIntType & 0x20) ? "FIFO overr." : "", + (iRxIntType & 0x10) ? "EOF" : "", + (iRxIntType & 0x08) ? "RxData" : "", + (iRxIntType & 0x02) ? "RxMaxLen" : "", + (iRxIntType & 0x01) ? "SIR bad" : ""); if (!iRxIntType) - IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__); + pr_debug("%s(): RxIRQ =0\n", __func__); if (iRxIntType & 0x10) { if (via_ircc_dma_receive_complete(self, iobase)) { @@ -1376,10 +1362,9 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id) } } // No ERR else { //ERR - IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n", - __func__, iRxIntType, iHostIntType, - RxCurCount(iobase, self), - self->RxLastCount); + pr_debug("%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n", + __func__, iRxIntType, iHostIntType, + RxCurCount(iobase, self), self->RxLastCount); if (iRxIntType & 0x20) { //FIFO OverRun ERR ResetChip(iobase, 0); @@ -1402,8 +1387,6 @@ static void hwreset(struct via_ircc_cb *self) int iobase; iobase = self->io.fir_base; - IRDA_DEBUG(3, "%s()\n", __func__); - ResetChip(iobase, 5); EnableDMA(iobase, OFF); EnableTX(iobase, OFF); @@ -1447,7 +1430,7 @@ static int via_ircc_is_receiving(struct via_ircc_cb *self) if (CkRxRecv(iobase, self)) status = TRUE; - IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status); + pr_debug("%s(): status=%x....\n", __func__, status); return status; } @@ -1465,16 +1448,14 @@ static int via_ircc_net_open(struct net_device *dev) int iobase; char hwname[32]; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); dev->stats.rx_packets = 0; IRDA_ASSERT(self != NULL, return 0;); iobase = self->io.fir_base; if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) { - IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name, - self->io.irq); + net_warn_ratelimited("%s, unable to allocate irq=%d\n", + driver_name, self->io.irq); return -EAGAIN; } /* @@ -1482,15 +1463,15 @@ static int via_ircc_net_open(struct net_device *dev) * failure. */ if (request_dma(self->io.dma, dev->name)) { - IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name, - self->io.dma); + net_warn_ratelimited("%s, unable to allocate dma=%d\n", + driver_name, self->io.dma); free_irq(self->io.irq, dev); return -EAGAIN; } if (self->io.dma2 != self->io.dma) { if (request_dma(self->io.dma2, dev->name)) { - IRDA_WARNING("%s, unable to allocate dma2=%d\n", - driver_name, self->io.dma2); + net_warn_ratelimited("%s, unable to allocate dma2=%d\n", + driver_name, self->io.dma2); free_irq(self->io.irq, dev); free_dma(self->io.dma); return -EAGAIN; @@ -1532,8 +1513,6 @@ static int via_ircc_net_close(struct net_device *dev) struct via_ircc_cb *self; int iobase; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); @@ -1576,8 +1555,8 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, - cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, + cmd); /* Disable interrupts & save flags */ spin_lock_irqsave(&self->lock, flags); switch (cmd) { diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index a2e556168286649f3c8d809ea8de145fdb7fbad5..ac39d9f33d5fcf9734f889123b881c14d2716d8c 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -429,8 +429,8 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr if (rd->buf == NULL || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { if (rd->buf) { - IRDA_ERROR("%s: failed to create PCI-MAP for %p", - __func__, rd->buf); + net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", + __func__, rd->buf); kfree(rd->buf); rd->buf = NULL; } @@ -483,11 +483,8 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev) ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); - if (!ringarea) { - IRDA_ERROR("%s: insufficient memory for descriptor rings\n", - __func__); + if (!ringarea) goto out; - } hwmap = (struct ring_descr_hw *)ringarea; idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], @@ -559,7 +556,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); len -= crclen; /* remove trailing CRC */ if (len <= 0) { - IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len); + pr_debug("%s: strange frame (len=%d)\n", __func__, len); ret |= VLSI_RX_DROP; goto done; } @@ -574,14 +571,14 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) */ le16_to_cpus(rd->buf+len); if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { - IRDA_DEBUG(0, "%s: crc error\n", __func__); + pr_debug("%s: crc error\n", __func__); ret |= VLSI_RX_CRC; goto done; } } if (!rd->skb) { - IRDA_WARNING("%s: rx packet lost\n", __func__); + net_warn_ratelimited("%s: rx packet lost\n", __func__); ret |= VLSI_RX_DROP; goto done; } @@ -610,8 +607,8 @@ static void vlsi_fill_rx(struct vlsi_ring *r) for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { if (rd_is_active(rd)) { - IRDA_WARNING("%s: driver bug: rx descr race with hw\n", - __func__); + net_warn_ratelimited("%s: driver bug: rx descr race with hw\n", + __func__); vlsi_ring_debug(r); break; } @@ -670,7 +667,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev) if (ring_first(r) == NULL) { /* we are in big trouble, if this should ever happen */ - IRDA_ERROR("%s: rx ring exhausted!\n", __func__); + net_err_ratelimited("%s: rx ring exhausted!\n", __func__); vlsi_ring_debug(r); } else @@ -692,7 +689,7 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) if (rd_is_active(rd)) { rd_set_status(rd, 0); if (rd_get_count(rd)) { - IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__); + pr_debug("%s - dropping rx packet\n", __func__); ret = -VLSI_RX_DROP; } rd_set_count(rd, 0); @@ -767,7 +764,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) int fifocnt; baudrate = idev->new_baud; - IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); + pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); if (baudrate == 4000000) { mode = IFF_FIR; config = IRCFG_FIR; @@ -783,8 +780,8 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY; switch(baudrate) { default: - IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", - __func__, baudrate); + net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n", + __func__, baudrate); baudrate = 9600; /* fallthru */ case 2400: @@ -801,7 +798,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; if (fifocnt != 0) { - IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); + pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt); } outw(0, iobase+VLSI_PIO_IRENABLE); @@ -825,14 +822,16 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) config ^= IRENABLE_SIR_ON; if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { - IRDA_WARNING("%s: failed to set %s mode!\n", __func__, - (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); + net_warn_ratelimited("%s: failed to set %s mode!\n", + __func__, + mode == IFF_SIR ? "SIR" : + mode == IFF_MIR ? "MIR" : "FIR"); ret = -1; } else { if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { - IRDA_WARNING("%s: failed to apply baudrate %d\n", - __func__, baudrate); + net_warn_ratelimited("%s: failed to apply baudrate %d\n", + __func__, baudrate); ret = -1; } else { @@ -977,8 +976,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, */ if (len >= r->len-5) - IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", - __func__); + net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n", + __func__); } else { /* hw deals with MIR/FIR mode wrapping */ @@ -1023,7 +1022,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; if (fifocnt != 0) { - IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); + pr_debug("%s: rx fifo not empty(%d)\n", + __func__, fifocnt); } config = inw(iobase+VLSI_PIO_IRCFG); @@ -1035,7 +1035,7 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, if (ring_put(r) == NULL) { netif_stop_queue(ndev); - IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__); + pr_debug("%s: tx ring full - queue stopped\n", __func__); } spin_unlock_irqrestore(&idev->lock, flags); @@ -1044,7 +1044,7 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, drop_unlock: spin_unlock_irqrestore(&idev->lock, flags); drop: - IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg); + net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg); dev_kfree_skb_any(skb); ndev->stats.tx_errors++; ndev->stats.tx_dropped++; @@ -1100,8 +1100,8 @@ static void vlsi_tx_interrupt(struct net_device *ndev) fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; if (fifocnt != 0) { - IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", - __func__, fifocnt); + pr_debug("%s: rx fifo not empty(%d)\n", + __func__, fifocnt); } outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); } @@ -1110,7 +1110,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev) if (netif_queue_stopped(ndev) && !idev->new_baud) { netif_wake_queue(ndev); - IRDA_DEBUG(3, "%s: queue awoken\n", __func__); + pr_debug("%s: queue awoken\n", __func__); } } @@ -1134,7 +1134,7 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) dev_kfree_skb_any(rd->skb); rd->skb = NULL; } - IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__); + pr_debug("%s - dropping tx packet\n", __func__); ret = -VLSI_TX_DROP; } else @@ -1183,8 +1183,8 @@ static int vlsi_start_clock(struct pci_dev *pdev) } if (count < 3) { if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ - IRDA_ERROR("%s: no PLL or failed to lock!\n", - __func__); + net_err_ratelimited("%s: no PLL or failed to lock!\n", + __func__); clkctl = CLKCTL_CLKSTP; pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); return -1; @@ -1192,8 +1192,8 @@ static int vlsi_start_clock(struct pci_dev *pdev) else /* was: clksrc=0(auto) */ clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ - IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", - __func__, clksrc); + pr_debug("%s: PLL not locked, fallback to clksrc=%d\n", + __func__, clksrc); } else clksrc = 1; /* got successful PLL lock */ @@ -1265,7 +1265,7 @@ static int vlsi_init_chip(struct pci_dev *pdev) /* start the clock and clean the registers */ if (vlsi_start_clock(pdev)) { - IRDA_ERROR("%s: no valid clock source\n", __func__); + net_err_ratelimited("%s: no valid clock source\n", __func__); return -1; } iobase = ndev->base_addr; @@ -1389,8 +1389,8 @@ static void vlsi_tx_timeout(struct net_device *ndev) idev->new_baud = idev->baud; /* keep current baudrate */ if (vlsi_start_hw(idev)) - IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", - __func__, pci_name(idev->pdev), ndev->name); + net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n", + __func__, pci_name(idev->pdev), ndev->name); else netif_start_queue(ndev); } @@ -1434,8 +1434,8 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; break; default: - IRDA_WARNING("%s: notsupp - cmd=%04x\n", - __func__, cmd); + net_warn_ratelimited("%s: notsupp - cmd=%04x\n", + __func__, cmd); ret = -EOPNOTSUPP; } @@ -1479,8 +1479,8 @@ static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) spin_unlock_irqrestore(&idev->lock,flags); if (boguscount <= 0) - IRDA_MESSAGE("%s: too much work in interrupt!\n", - __func__); + net_info_ratelimited("%s: too much work in interrupt!\n", + __func__); return IRQ_RETVAL(handled); } @@ -1493,7 +1493,7 @@ static int vlsi_open(struct net_device *ndev) char hwname[32]; if (pci_request_regions(idev->pdev, drivername)) { - IRDA_WARNING("%s: io resource busy\n", __func__); + net_warn_ratelimited("%s: io resource busy\n", __func__); goto errout; } ndev->base_addr = pci_resource_start(idev->pdev,0); @@ -1507,8 +1507,8 @@ static int vlsi_open(struct net_device *ndev) if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, drivername, ndev)) { - IRDA_WARNING("%s: couldn't get IRQ: %d\n", - __func__, ndev->irq); + net_warn_ratelimited("%s: couldn't get IRQ: %d\n", + __func__, ndev->irq); goto errout_io; } @@ -1529,7 +1529,8 @@ static int vlsi_open(struct net_device *ndev) netif_start_queue(ndev); - IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name); + net_info_ratelimited("%s: device %s operational\n", + __func__, ndev->name); return 0; @@ -1563,7 +1564,7 @@ static int vlsi_close(struct net_device *ndev) pci_release_regions(idev->pdev); - IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name); + net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name); return 0; } @@ -1590,7 +1591,8 @@ static int vlsi_irda_init(struct net_device *ndev) if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { - IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__); + net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n", + __func__); return -1; } @@ -1632,19 +1634,19 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) else pdev->current_state = 0; /* hw must be running now */ - IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n", - drivername, pci_name(pdev)); + net_info_ratelimited("%s: IrDA PCI controller %s detected\n", + drivername, pci_name(pdev)); if ( !pci_resource_start(pdev,0) || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { - IRDA_ERROR("%s: bar 0 invalid", __func__); + net_err_ratelimited("%s: bar 0 invalid", __func__); goto out_disable; } ndev = alloc_irdadev(sizeof(*idev)); if (ndev==NULL) { - IRDA_ERROR("%s: Unable to allocate device memory.\n", - __func__); + net_err_ratelimited("%s: Unable to allocate device memory.\n", + __func__); goto out_disable; } @@ -1659,7 +1661,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_freedev; if (register_netdev(ndev) < 0) { - IRDA_ERROR("%s: register_netdev failed\n", __func__); + net_err_ratelimited("%s: register_netdev failed\n", __func__); goto out_freedev; } @@ -1669,14 +1671,15 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root, VLSI_PROC_FOPS, ndev); if (!ent) { - IRDA_WARNING("%s: failed to create proc entry\n", - __func__); + net_warn_ratelimited("%s: failed to create proc entry\n", + __func__); } else { proc_set_size(ent, 0); } idev->proc_entry = ent; } - IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name); + net_info_ratelimited("%s: registered device %s\n", + drivername, ndev->name); pci_set_drvdata(pdev, ndev); mutex_unlock(&idev->mtx); @@ -1698,7 +1701,7 @@ static void vlsi_irda_remove(struct pci_dev *pdev) vlsi_irda_dev_t *idev; if (!ndev) { - IRDA_ERROR("%s: lost netdevice?\n", drivername); + net_err_ratelimited("%s: lost netdevice?\n", drivername); return; } @@ -1714,7 +1717,7 @@ static void vlsi_irda_remove(struct pci_dev *pdev) free_netdev(ndev); - IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev)); + net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev)); } #ifdef CONFIG_PM @@ -1733,8 +1736,8 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) vlsi_irda_dev_t *idev; if (!ndev) { - IRDA_ERROR("%s - %s: no netdevice\n", - __func__, pci_name(pdev)); + net_err_ratelimited("%s - %s: no netdevice\n", + __func__, pci_name(pdev)); return 0; } idev = netdev_priv(ndev); @@ -1745,7 +1748,9 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) pdev->current_state = state.event; } else - IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event); + net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n", + __func__, pci_name(pdev), + pdev->current_state, state.event); mutex_unlock(&idev->mtx); return 0; } @@ -1772,16 +1777,16 @@ static int vlsi_irda_resume(struct pci_dev *pdev) vlsi_irda_dev_t *idev; if (!ndev) { - IRDA_ERROR("%s - %s: no netdevice\n", - __func__, pci_name(pdev)); + net_err_ratelimited("%s - %s: no netdevice\n", + __func__, pci_name(pdev)); return 0; } idev = netdev_priv(ndev); mutex_lock(&idev->mtx); if (pdev->current_state == 0) { mutex_unlock(&idev->mtx); - IRDA_WARNING("%s - %s: already resumed\n", - __func__, pci_name(pdev)); + net_warn_ratelimited("%s - %s: already resumed\n", + __func__, pci_name(pdev)); return 0; } @@ -1800,7 +1805,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev) * now we explicitly set pdev->current_state = 0 after enabling the * device and independently resume_ok should catch any garbage config. */ - IRDA_WARNING("%s - hm, nothing to resume?\n", __func__); + net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__); mutex_unlock(&idev->mtx); return 0; } @@ -1837,7 +1842,8 @@ static int __init vlsi_mod_init(void) int i, ret; if (clksrc < 0 || clksrc > 3) { - IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc); + net_err_ratelimited("%s: invalid clksrc=%d\n", + drivername, clksrc); return -1; } @@ -1850,7 +1856,10 @@ static int __init vlsi_mod_init(void) case 64: break; default: - IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]); + net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n", + drivername, + i ? "rx" : "tx", + ringsize[i]); ringsize[i] = 8; break; } diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h index 56399204e68cf38503d831585e7931ef9b04d3f7..f9119c6d2a09082bb411b8887ea9f25e84f8137f 100644 --- a/drivers/net/irda/vlsi_ir.h +++ b/drivers/net/irda/vlsi_ir.h @@ -615,7 +615,8 @@ static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s) */ if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) { - IRDA_ERROR("%s: pci busaddr inconsistency!\n", __func__); + net_err_ratelimited("%s: pci busaddr inconsistency!\n", + __func__); dump_stack(); return; } diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 11dbdf36d9c1b328ff70b6e5d0e5d2f7729dbb2a..4e3d2e7c697c76f483eb264b1c5da9244eb434c6 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -110,8 +110,6 @@ static int __init w83977af_init(void) { int i; - IRDA_DEBUG(0, "%s()\n", __func__ ); - for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) { if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) return 0; @@ -129,8 +127,6 @@ static void __exit w83977af_cleanup(void) { int i; - IRDA_DEBUG(4, "%s()\n", __func__ ); - for (i=0; i < ARRAY_SIZE(dev_self); i++) { if (dev_self[i]) w83977af_close(dev_self[i]); @@ -157,12 +153,10 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq, struct w83977af_ir *self; int err; - IRDA_DEBUG(0, "%s()\n", __func__ ); - /* Lock the port that we need */ if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) { - IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n", - __func__ , iobase); + pr_debug("%s(), can't get iobase of 0x%03x\n", + __func__ , iobase); return -ENODEV; } @@ -236,10 +230,11 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq, err = register_netdev(dev); if (err) { - IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__); + net_err_ratelimited("%s(), register_netdevice() failed!\n", + __func__); goto err_out3; } - IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); + net_info_ratelimited("IrDA: Registered device %s\n", dev->name); /* Need to store self somewhere */ dev_self[i] = self; @@ -268,8 +263,6 @@ static int w83977af_close(struct w83977af_ir *self) { int iobase; - IRDA_DEBUG(0, "%s()\n", __func__ ); - iobase = self->io.fir_base; #ifdef CONFIG_USE_W977_PNP @@ -288,8 +281,8 @@ static int w83977af_close(struct w83977af_ir *self) unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ - IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n", - __func__ , self->io.fir_base); + pr_debug("%s(), Releasing Region %03x\n", + __func__ , self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) @@ -311,7 +304,6 @@ static int w83977af_probe(int iobase, int irq, int dma) int i; for (i=0; i < 2; i++) { - IRDA_DEBUG( 0, "%s()\n", __func__ ); #ifdef CONFIG_USE_W977_PNP /* Enter PnP configuration mode */ w977_efm_enter(efbase[i]); @@ -392,13 +384,13 @@ static int w83977af_probe(int iobase, int irq, int dma) switch_bank(iobase, SET7); outb(0x40, iobase+7); - IRDA_MESSAGE("W83977AF (IR) driver loaded. " - "Version: 0x%02x\n", version); + net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n", + version); return 0; } else { /* Try next extented function register address */ - IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ ); + pr_debug("%s(), Wrong chip version", __func__); } } return -1; @@ -434,19 +426,19 @@ static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed) case 115200: outb(0x01, iobase+ABLL); break; case 576000: ir_mode = HCR_MIR_576; - IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ ); + pr_debug("%s(), handling baud of 576000\n", __func__); break; case 1152000: ir_mode = HCR_MIR_1152; - IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ ); + pr_debug("%s(), handling baud of 1152000\n", __func__); break; case 4000000: ir_mode = HCR_FIR; - IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ ); + pr_debug("%s(), handling baud of 4000000\n", __func__); break; default: ir_mode = HCR_FIR; - IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed); + pr_debug("%s(), unknown baud rate of %d\n", __func__ , speed); break; } @@ -497,8 +489,8 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb, iobase = self->io.fir_base; - IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies, - (int) skb->len); + pr_debug("%s(%ld), skb->len=%d\n", __func__ , jiffies, + (int)skb->len); /* Lock transmit buffer */ netif_stop_queue(dev); @@ -525,7 +517,7 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb, self->tx_buff.len = skb->len; mtt = irda_get_mtt(skb); - IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); + pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); if (mtt) udelay(mtt); @@ -559,7 +551,7 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb, static void w83977af_dma_write(struct w83977af_ir *self, int iobase) { __u8 set; - IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len); + pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len); /* Save current set */ set = inb(iobase+SSR); @@ -594,19 +586,16 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size) int actual = 0; __u8 set; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* Save current bank */ set = inb(iobase+SSR); switch_bank(iobase, SET0); if (!(inb_p(iobase+USR) & USR_TSRE)) { - IRDA_DEBUG(4, - "%s(), warning, FIFO not empty yet!\n", __func__ ); + pr_debug("%s(), warning, FIFO not empty yet!\n", __func__); fifo_size -= 17; - IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n", - __func__ , fifo_size); + pr_debug("%s(), %d bytes left in tx fifo\n", + __func__ , fifo_size); } /* Fill FIFO with current frame */ @@ -615,8 +604,8 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size) outb(buf[actual++], iobase+TBR); } - IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", - __func__ , fifo_size, actual, len); + pr_debug("%s(), fifo_size %d ; %d sent of %d\n", + __func__ , fifo_size, actual, len); /* Restore bank */ outb(set, iobase+SSR); @@ -636,7 +625,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self) int iobase; __u8 set; - IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies); + pr_debug("%s(%ld)\n", __func__ , jiffies); IRDA_ASSERT(self != NULL, return;); @@ -651,7 +640,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self) /* Check for underrun! */ if (inb(iobase+AUDR) & AUDR_UNDR) { - IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ ); + pr_debug("%s(), Transmit underrun!\n", __func__); self->netdev->stats.tx_errors++; self->netdev->stats.tx_fifo_errors++; @@ -692,7 +681,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self) #endif IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(4, "%s\n", __func__ ); + pr_debug("%s\n", __func__); iobase= self->io.fir_base; @@ -763,7 +752,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self) __u8 set; __u8 status; - IRDA_DEBUG(4, "%s\n", __func__ ); + pr_debug("%s\n", __func__); st_fifo = &self->st_fifo; @@ -880,8 +869,6 @@ static void w83977af_pio_receive(struct w83977af_ir *self) __u8 byte = 0x00; int iobase; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); iobase = self->io.fir_base; @@ -907,7 +894,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr) __u8 set; int iobase; - IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr); + pr_debug("%s(), isr=%#x\n", __func__ , isr); iobase = self->io.fir_base; /* Transmit FIFO low on data */ @@ -943,8 +930,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr) if (isr & ISR_TXEMP_I) { /* Check if we need to change the speed? */ if (self->new_speed) { - IRDA_DEBUG(2, - "%s(), Changing speed!\n", __func__ ); + pr_debug("%s(), Changing speed!\n", __func__); w83977af_change_speed(self, self->new_speed); self->new_speed = 0; } @@ -1126,7 +1112,6 @@ static int w83977af_net_open(struct net_device *dev) char hwname[32]; __u8 set; - IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); @@ -1189,8 +1174,6 @@ static int w83977af_net_close(struct net_device *dev) int iobase; __u8 set; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); @@ -1244,7 +1227,7 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) IRDA_ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); + pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd); spin_lock_irqsave(&self->lock, flags); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index bfb0b6ec8c56e26d67fa94814fe460af34c95fed..612e0731142d29aef8cca9e882ef66e237fb4960 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -742,11 +742,12 @@ static struct lock_class_key macvlan_netdev_xmit_lock_key; static struct lock_class_key macvlan_netdev_addr_lock_key; #define ALWAYS_ON_FEATURES \ - (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX) + (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \ + NETIF_F_GSO_ROBUST) #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ - NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) @@ -783,6 +784,7 @@ static int macvlan_init(struct net_device *dev) (lowerdev->state & MACVLAN_STATE_MASK); dev->features = lowerdev->features & MACVLAN_FEATURES; dev->features |= ALWAYS_ON_FEATURES; + dev->hw_features |= NETIF_F_LRO; dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->gso_max_size = lowerdev->gso_max_size; dev->iflink = lowerdev->ifindex; @@ -872,7 +874,7 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev, static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, + const unsigned char *addr, u16 vid, u16 flags) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -897,7 +899,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr) + const unsigned char *addr, u16 vid) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; @@ -935,15 +937,15 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, netdev_features_t features) { struct macvlan_dev *vlan = netdev_priv(dev); + netdev_features_t lowerdev_features = vlan->lowerdev->features; netdev_features_t mask; features |= NETIF_F_ALL_FOR_ALL; features &= (vlan->set_features | ~MACVLAN_FEATURES); mask = features; - features = netdev_increment_features(vlan->lowerdev->features, - features, - mask); + lowerdev_features &= (features | ~NETIF_F_LRO); + features = netdev_increment_features(lowerdev_features, features, mask); features |= ALWAYS_ON_FEATURES; features &= ~NETIF_F_NETNS_LOCAL; @@ -1055,6 +1057,9 @@ static int macvlan_port_create(struct net_device *dev) if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; + if (netif_is_ipvlan_port(dev)) + return -EBUSY; + port = kzalloc(sizeof(*port), GFP_KERNEL); if (port == NULL) return -ENOMEM; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index af90ab5e57680e65b131355c1068eb37d23e7135..60f7ee5fafbe004a766fdd262d2f074e04070bcb 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -656,12 +657,12 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, /* Get packet from user space buffer */ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, - const struct iovec *iv, unsigned long total_len, - size_t count, int noblock) + struct iov_iter *from, int noblock) { int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); struct sk_buff *skb; struct macvlan_dev *vlan; + unsigned long total_len = iov_iter_count(from); unsigned long len = total_len; int err; struct virtio_net_hdr vnet_hdr = { 0 }; @@ -669,6 +670,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, int copylen = 0; bool zerocopy = false; size_t linear; + ssize_t n; if (q->flags & IFF_VNET_HDR) { vnet_hdr_len = q->vnet_hdr_sz; @@ -678,10 +680,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, goto err; len -= vnet_hdr_len; - err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, - sizeof(vnet_hdr)); - if (err < 0) + err = -EFAULT; + n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from); + if (n != sizeof(vnet_hdr)) goto err; + iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && macvtap16_to_cpu(q, vnet_hdr.csum_start) + macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > @@ -698,18 +701,17 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (unlikely(len < ETH_HLEN)) goto err; - err = -EMSGSIZE; - if (unlikely(count > UIO_MAXIOV)) - goto err; - if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { + struct iov_iter i; + copylen = vnet_hdr.hdr_len ? macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; if (copylen > good_linear) copylen = good_linear; linear = copylen; - if (iov_pages(iv, vnet_hdr_len + copylen, count) - <= MAX_SKB_FRAGS) + i = *from; + iov_iter_advance(&i, copylen); + if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) zerocopy = true; } @@ -727,10 +729,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, goto err; if (zerocopy) - err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); + err = zerocopy_sg_from_iter(skb, from); else { - err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, - len); + err = skb_copy_datagram_from_iter(skb, 0, from, len); if (!err && m && m->msg_control) { struct ubuf_info *uarg = m->msg_control; uarg->callback(uarg, false); @@ -783,46 +784,42 @@ err: return err; } -static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv, - unsigned long count, loff_t pos) +static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; - ssize_t result = -ENOLINK; struct macvtap_queue *q = file->private_data; - result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count, - file->f_flags & O_NONBLOCK); - return result; + return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); } /* Put packet to the user space buffer */ static ssize_t macvtap_put_user(struct macvtap_queue *q, const struct sk_buff *skb, - const struct iovec *iv, int len) + struct iov_iter *iter) { int ret; int vnet_hdr_len = 0; int vlan_offset = 0; - int copied, total; + int total; if (q->flags & IFF_VNET_HDR) { struct virtio_net_hdr vnet_hdr; vnet_hdr_len = q->vnet_hdr_sz; - if ((len -= vnet_hdr_len) < 0) + if (iov_iter_count(iter) < vnet_hdr_len) return -EINVAL; macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr); - if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) + if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != + sizeof(vnet_hdr)) return -EFAULT; + + iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); } - total = copied = vnet_hdr_len; + total = vnet_hdr_len; total += skb->len; - if (!vlan_tx_tag_present(skb)) - len = min_t(int, skb->len, len); - else { - int copy; + if (vlan_tx_tag_present(skb)) { struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; @@ -831,86 +828,77 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); - len = min_t(int, skb->len + VLAN_HLEN, len); total += VLAN_HLEN; - copy = min_t(int, vlan_offset, len); - ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); - len -= copy; - copied += copy; - if (ret || !len) + ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); + if (ret || !iov_iter_count(iter)) goto done; - copy = min_t(int, sizeof(veth), len); - ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); - len -= copy; - copied += copy; - if (ret || !len) + ret = copy_to_iter(&veth, sizeof(veth), iter); + if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } - ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); + ret = skb_copy_datagram_iter(skb, vlan_offset, iter, + skb->len - vlan_offset); done: return ret ? ret : total; } static ssize_t macvtap_do_read(struct macvtap_queue *q, - const struct iovec *iv, unsigned long len, + struct iov_iter *to, int noblock) { DEFINE_WAIT(wait); struct sk_buff *skb; ssize_t ret = 0; - while (len) { + if (!iov_iter_count(to)) + return 0; + + while (1) { if (!noblock) prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE); /* Read frames from the queue */ skb = skb_dequeue(&q->sk.sk_receive_queue); - if (!skb) { - if (noblock) { - ret = -EAGAIN; - break; - } - if (signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } - /* Nothing to read, let's sleep */ - schedule(); - continue; + if (skb) + break; + if (noblock) { + ret = -EAGAIN; + break; } - ret = macvtap_put_user(q, skb, iv, len); - kfree_skb(skb); - break; + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + /* Nothing to read, let's sleep */ + schedule(); + } + if (skb) { + ret = macvtap_put_user(q, skb, to); + if (unlikely(ret < 0)) + kfree_skb(skb); + else + consume_skb(skb); } - if (!noblock) finish_wait(sk_sleep(&q->sk), &wait); return ret; } -static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv, - unsigned long count, loff_t pos) +static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct macvtap_queue *q = file->private_data; - ssize_t len, ret = 0; + ssize_t len = iov_iter_count(to), ret; - len = iov_length(iv, count); - if (len < 0) { - ret = -EINVAL; - goto out; - } - - ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK); + ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; -out: return ret; } @@ -1109,8 +1097,10 @@ static const struct file_operations macvtap_fops = { .owner = THIS_MODULE, .open = macvtap_open, .release = macvtap_release, - .aio_read = macvtap_aio_read, - .aio_write = macvtap_aio_write, + .read = new_sync_read, + .write = new_sync_write, + .read_iter = macvtap_read_iter, + .write_iter = macvtap_write_iter, .poll = macvtap_poll, .llseek = no_llseek, .unlocked_ioctl = macvtap_ioctl, @@ -1123,8 +1113,7 @@ static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); - return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen, - m->msg_flags & MSG_DONTWAIT); + return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); } static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, @@ -1135,8 +1124,7 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, int ret; if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) return -EINVAL; - ret = macvtap_do_read(q, m->msg_iov, total_len, - flags & MSG_DONTWAIT); + ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 75472cf734de14f53e57daa1e541701e71643c40..b4b0f804e84c937df436e68332b9f289ff16a75d 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -26,7 +26,7 @@ config AMD_PHY config AMD_XGBE_PHY tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" - depends on OF + depends on OF && HAS_IOMEM ---help--- Currently supports the AMD 10GbE PHY diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index c456559f6e7f2ea3ae713e98c93e58148aaa7776..903dc3dc9ea703dfae71dc4917b432e413735ee4 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c @@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev) if (ret & MDIO_CTRL1_RESET) return -ETIMEDOUT; - return 0; + /* Make sure the XPCS and SerDes are in compatible states */ + return amd_xgbe_phy_xgmii_mode(phydev); } static int amd_xgbe_phy_config_init(struct phy_device *phydev) @@ -1467,20 +1468,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = { }, }; -static int __init amd_xgbe_phy_init(void) -{ - return phy_drivers_register(amd_xgbe_phy_driver, - ARRAY_SIZE(amd_xgbe_phy_driver)); -} - -static void __exit amd_xgbe_phy_exit(void) -{ - phy_drivers_unregister(amd_xgbe_phy_driver, - ARRAY_SIZE(amd_xgbe_phy_driver)); -} - -module_init(amd_xgbe_phy_init); -module_exit(amd_xgbe_phy_exit); +module_phy_driver(amd_xgbe_phy_driver); static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = { { XGBE_PHY_ID, XGBE_PHY_MASK }, diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index a3fb5ceb6487f98722f10928ef5ad801684f5169..65a488f82eb8c806e703f52abd9295171b6c096c 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -61,7 +61,7 @@ static int am79c_config_intr(struct phy_device *phydev) return err; } -static struct phy_driver am79c_driver = { +static struct phy_driver am79c_driver[] = { { .phy_id = PHY_ID_AM79C874, .name = "AM79C874", .phy_id_mask = 0xfffffff0, @@ -73,20 +73,9 @@ static struct phy_driver am79c_driver = { .ack_interrupt = am79c_ack_interrupt, .config_intr = am79c_config_intr, .driver = { .owner = THIS_MODULE,}, -}; - -static int __init am79c_init(void) -{ - return phy_driver_register(&am79c_driver); -} - -static void __exit am79c_exit(void) -{ - phy_driver_unregister(&am79c_driver); -} +} }; -module_init(am79c_init); -module_exit(am79c_exit); +module_phy_driver(am79c_driver); static struct mdio_device_id __maybe_unused amd_tbl[] = { { PHY_ID_AM79C874, 0xfffffff0 }, diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index fdc1b418fa6a82a434fa305c8785546bc6584ad9..f80e19ac67041a0e75778761373ec3ce2a982d95 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -352,19 +352,7 @@ static struct phy_driver at803x_driver[] = { }, } }; -static int __init atheros_init(void) -{ - return phy_drivers_register(at803x_driver, - ARRAY_SIZE(at803x_driver)); -} - -static void __exit atheros_exit(void) -{ - phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver)); -} - -module_init(atheros_init); -module_exit(atheros_exit); +module_phy_driver(at803x_driver); static struct mdio_device_id __maybe_unused atheros_tbl[] = { { ATH8030_PHY_ID, 0xffffffef }, diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index ac55b08078534e8170416630dced41dba150564c..830ec31f952f290c0b4f3b5e58186ce5924ce55a 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -100,20 +100,7 @@ static struct phy_driver bcm63xx_driver[] = { .driver = { .owner = THIS_MODULE }, } }; -static int __init bcm63xx_phy_init(void) -{ - return phy_drivers_register(bcm63xx_driver, - ARRAY_SIZE(bcm63xx_driver)); -} - -static void __exit bcm63xx_phy_exit(void) -{ - phy_drivers_unregister(bcm63xx_driver, - ARRAY_SIZE(bcm63xx_driver)); -} - -module_init(bcm63xx_phy_init); -module_exit(bcm63xx_phy_exit); +module_phy_driver(bcm63xx_driver); static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = { { 0x00406000, 0xfffffc00 }, diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 1d211d36903965a7f2fc461190763f2b2aadd705..974ec45152697a8dc6dfc93c0bb40b7852bae41c 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -39,45 +39,15 @@ #define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0) #define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1) +#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2) #define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3) #define AFE_TX_CONFIG MISC_ADDR(0x39, 0) +#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1) +#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3) #define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0) #define CORE_EXPB0 0xb0 -static int bcm7445_config_init(struct phy_device *phydev) -{ - int ret; - const struct bcm7445_regs { - int reg; - u16 value; - } bcm7445_regs_cfg[] = { - /* increases ADC latency by 24ns */ - { MII_BCM54XX_EXP_SEL, 0x0038 }, - { MII_BCM54XX_EXP_DATA, 0xAB95 }, - /* increases internal 1V LDO voltage by 5% */ - { MII_BCM54XX_EXP_SEL, 0x2038 }, - { MII_BCM54XX_EXP_DATA, 0xBB22 }, - /* reduce RX low pass filter corner frequency */ - { MII_BCM54XX_EXP_SEL, 0x6038 }, - { MII_BCM54XX_EXP_DATA, 0xFFC5 }, - /* reduce RX high pass filter corner frequency */ - { MII_BCM54XX_EXP_SEL, 0x003a }, - { MII_BCM54XX_EXP_DATA, 0x2002 }, - }; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) { - ret = phy_write(phydev, - bcm7445_regs_cfg[i].reg, - bcm7445_regs_cfg[i].value); - if (ret) - return ret; - } - - return 0; -} - static void phy_write_exp(struct phy_device *phydev, u16 reg, u16 value) { @@ -102,7 +72,16 @@ static void phy_write_misc(struct phy_device *phydev, phy_write(phydev, MII_BCM54XX_EXP_DATA, value); } -static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev) +static void r_rc_cal_reset(struct phy_device *phydev) +{ + /* Reset R_CAL/RC_CAL Engine */ + phy_write_exp(phydev, 0x00b0, 0x0010); + + /* Disable Reset R_AL/RC_CAL Engine */ + phy_write_exp(phydev, 0x00b0, 0x0000); +} + +static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) { /* Increase VCO range to prevent unlocking problem of PLL at low * temp @@ -123,11 +102,7 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev) /* Switch to CORE_BASE1E */ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd); - /* Reset R_CAL/RC_CAL Engine */ - phy_write_exp(phydev, CORE_EXPB0, 0x0010); - - /* Disable Reset R_CAL/RC_CAL Engine */ - phy_write_exp(phydev, CORE_EXPB0, 0x0000); + r_rc_cal_reset(phydev); /* write AFE_RXCONFIG_0 */ phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19); @@ -147,6 +122,71 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev) return 0; } +static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev) +{ + /* AFE_RXCONFIG_0 */ + phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb15); + + /* AFE_RXCONFIG_1 */ + phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f); + + /* AFE_RXCONFIG_2, set rCal offset for HT=0 code and LT=-2 code */ + phy_write_misc(phydev, AFE_RXCONFIG_2, 0x2003); + + /* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */ + phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0); + + /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */ + phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061); + + /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */ + phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da); + + /* AFE_VDAC_OTHERS_0, set 1000BT Cidac=010 for all ports */ + phy_write_misc(phydev, AFE_VDAC_OTHERS_0, 0xa020); + + /* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal + * offset for HT=0 code + */ + phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3); + + /* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */ + phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010); + + /* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */ + phy_write_misc(phydev, DSP_TAP10, 0x011b); + + /* Reset R_CAL/RC_CAL engine */ + r_rc_cal_reset(phydev); + + return 0; +} + +static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev) +{ + /* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */ + phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f); + + /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */ + phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da); + + /* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal + * offset for HT=0 code + */ + phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3); + + /* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */ + phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010); + + /* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */ + phy_write_misc(phydev, DSP_TAP10, 0x011b); + + /* Reset R_CAL/RC_CAL engine */ + r_rc_cal_reset(phydev); + + return 0; +} + static int bcm7xxx_apd_enable(struct phy_device *phydev) { int val; @@ -200,15 +240,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) u8 patch = PHY_BRCM_7XXX_PATCH(phydev->dev_flags); int ret = 0; - dev_info(&phydev->dev, "PHY revision: 0x%02x, patch: %d\n", rev, patch); + pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n", + dev_name(&phydev->dev), phydev->drv->name, rev, patch); switch (rev) { - case 0xa0: case 0xb0: - ret = bcm7445_config_init(phydev); + ret = bcm7xxx_28nm_b0_afe_config_init(phydev); + break; + case 0xd0: + ret = bcm7xxx_28nm_d0_afe_config_init(phydev); + break; + case 0xe0: + case 0xf0: + /* Rev G0 introduces a roll over */ + case 0x10: + ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev); break; default: - ret = bcm7xxx_28nm_afe_config_init(phydev); break; } @@ -336,7 +384,7 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev) .features = PHY_GBIT_FEATURES | \ SUPPORTED_Pause | SUPPORTED_Asym_Pause, \ .flags = PHY_IS_INTERNAL, \ - .config_init = bcm7xxx_28nm_afe_config_init, \ + .config_init = bcm7xxx_28nm_config_init, \ .config_aneg = genphy_config_aneg, \ .read_status = genphy_read_status, \ .resume = bcm7xxx_28nm_resume, \ @@ -416,20 +464,7 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { } }; -static int __init bcm7xxx_phy_init(void) -{ - return phy_drivers_register(bcm7xxx_driver, - ARRAY_SIZE(bcm7xxx_driver)); -} - -static void __exit bcm7xxx_phy_exit(void) -{ - phy_drivers_unregister(bcm7xxx_driver, - ARRAY_SIZE(bcm7xxx_driver)); -} - -module_init(bcm7xxx_phy_init); -module_exit(bcm7xxx_phy_exit); +module_phy_driver(bcm7xxx_driver); MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl); diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index 799789518e873edf273c95be6ec1c3b59ff4aa80..1eca20452f031b3154765e252d2cac1be1ef87f7 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -216,18 +216,6 @@ static struct phy_driver bcm87xx_driver[] = { .driver = { .owner = THIS_MODULE }, } }; -static int __init bcm87xx_init(void) -{ - return phy_drivers_register(bcm87xx_driver, - ARRAY_SIZE(bcm87xx_driver)); -} -module_init(bcm87xx_init); - -static void __exit bcm87xx_exit(void) -{ - phy_drivers_unregister(bcm87xx_driver, - ARRAY_SIZE(bcm87xx_driver)); -} -module_exit(bcm87xx_exit); +module_phy_driver(bcm87xx_driver); MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 854f2c9a7b2b5224f1dab808d16d8f514cdd486a..a52afb26421b8e975f3742d6ab77a2eddc133d8c 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -654,20 +654,7 @@ static struct phy_driver broadcom_drivers[] = { .driver = { .owner = THIS_MODULE }, } }; -static int __init broadcom_init(void) -{ - return phy_drivers_register(broadcom_drivers, - ARRAY_SIZE(broadcom_drivers)); -} - -static void __exit broadcom_exit(void) -{ - phy_drivers_unregister(broadcom_drivers, - ARRAY_SIZE(broadcom_drivers)); -} - -module_init(broadcom_init); -module_exit(broadcom_exit); +module_phy_driver(broadcom_drivers); static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM5411, 0xfffffff0 }, diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index b57ce0cc9657fb9657f54fd05ed5632bf82bd4fc..27f5464899d47e2ba9fa21eca7c9d436f14156af 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -129,20 +129,7 @@ static struct phy_driver cis820x_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init cicada_init(void) -{ - return phy_drivers_register(cis820x_driver, - ARRAY_SIZE(cis820x_driver)); -} - -static void __exit cicada_exit(void) -{ - phy_drivers_unregister(cis820x_driver, - ARRAY_SIZE(cis820x_driver)); -} - -module_init(cicada_init); -module_exit(cicada_exit); +module_phy_driver(cis820x_driver); static struct mdio_device_id __maybe_unused cicada_tbl[] = { { 0x000fc410, 0x000ffff0 }, diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index d2c08f625a41a3d33a2ef1c6ab1fffdd08a5bc4b..0d16c7d9e1bf66c35890dfa119091a5b50375f59 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -182,20 +182,7 @@ static struct phy_driver dm91xx_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init davicom_init(void) -{ - return phy_drivers_register(dm91xx_driver, - ARRAY_SIZE(dm91xx_driver)); -} - -static void __exit davicom_exit(void) -{ - phy_drivers_unregister(dm91xx_driver, - ARRAY_SIZE(dm91xx_driver)); -} - -module_init(davicom_init); -module_exit(davicom_exit); +module_phy_driver(dm91xx_driver); static struct mdio_device_id __maybe_unused davicom_tbl[] = { { 0x0181b880, 0x0ffffff0 }, diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c index a8eb19ec3183b5606e300c27061701298aaa13bf..a907743816a84d2b78de3b2778af1f1dc9adcbe9 100644 --- a/drivers/net/phy/et1011c.c +++ b/drivers/net/phy/et1011c.c @@ -87,7 +87,7 @@ static int et1011c_read_status(struct phy_device *phydev) return ret; } -static struct phy_driver et1011c_driver = { +static struct phy_driver et1011c_driver[] = { { .phy_id = 0x0282f014, .name = "ET1011C", .phy_id_mask = 0xfffffff0, @@ -96,20 +96,9 @@ static struct phy_driver et1011c_driver = { .config_aneg = et1011c_config_aneg, .read_status = et1011c_read_status, .driver = { .owner = THIS_MODULE,}, -}; - -static int __init et1011c_init(void) -{ - return phy_driver_register(&et1011c_driver); -} - -static void __exit et1011c_exit(void) -{ - phy_driver_unregister(&et1011c_driver); -} +} }; -module_init(et1011c_init); -module_exit(et1011c_exit); +module_phy_driver(et1011c_driver); static struct mdio_device_id __maybe_unused et1011c_tbl[] = { { 0x0282f014, 0xfffffff0 }, diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 97bf58bf4939a43188de0446deda4a2c366c7029..8644f039d92274399fe9d3c1389474c07fe2df1c 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -253,20 +253,7 @@ static struct phy_driver icplus_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init icplus_init(void) -{ - return phy_drivers_register(icplus_driver, - ARRAY_SIZE(icplus_driver)); -} - -static void __exit icplus_exit(void) -{ - phy_drivers_unregister(icplus_driver, - ARRAY_SIZE(icplus_driver)); -} - -module_init(icplus_init); -module_exit(icplus_exit); +module_phy_driver(icplus_driver); static struct mdio_device_id __maybe_unused icplus_tbl[] = { { 0x02430d80, 0x0ffffff0 }, diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index 9108f319170147ed6dda42f58552b16fa05ab4c4..a3a5a703635b100703c728a4f19fffa8b33d809b 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -312,20 +312,7 @@ static struct phy_driver lxt97x_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init lxt_init(void) -{ - return phy_drivers_register(lxt97x_driver, - ARRAY_SIZE(lxt97x_driver)); -} - -static void __exit lxt_exit(void) -{ - phy_drivers_unregister(lxt97x_driver, - ARRAY_SIZE(lxt97x_driver)); -} - -module_init(lxt_init); -module_exit(lxt_exit); +module_phy_driver(lxt97x_driver); static struct mdio_device_id __maybe_unused lxt_tbl[] = { { 0x78100000, 0xfffffff0 }, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 225c033b08f3fbc4217fc238ec7048b611b8d87b..1b1698f98818219c623038c8781dc2e806a3070d 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -54,6 +54,9 @@ #define MII_M1145_PHY_EXT_CR 0x14 #define MII_M1145_RGMII_RX_DELAY 0x0080 #define MII_M1145_RGMII_TX_DELAY 0x0002 +#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 +#define MII_M1145_HWCFG_MODE_MASK 0xf +#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 #define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 #define MII_M1145_HWCFG_MODE_MASK 0xf @@ -123,6 +126,9 @@ #define MII_M1116R_CONTROL_REG_MAC 21 +#define MII_88E3016_PHY_SPEC_CTRL 0x10 +#define MII_88E3016_DISABLE_SCRAMBLER 0x0200 +#define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030 MODULE_DESCRIPTION("Marvell PHY driver"); MODULE_AUTHOR("Andy Fleming"); @@ -439,6 +445,25 @@ static int m88e1116r_config_init(struct phy_device *phydev) return 0; } +static int m88e3016_config_init(struct phy_device *phydev) +{ + int reg; + + /* Enable Scrambler and Auto-Crossover */ + reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL); + if (reg < 0) + return reg; + + reg &= ~MII_88E3016_DISABLE_SCRAMBLER; + reg |= MII_88E3016_AUTO_MDIX_CROSSOVER; + + reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg); + if (reg < 0) + return reg; + + return 0; +} + static int m88e1111_config_init(struct phy_device *phydev) { int err; @@ -625,6 +650,7 @@ static int m88e1149_config_init(struct phy_device *phydev) static int m88e1145_config_init(struct phy_device *phydev) { int err; + int temp; /* Take care of errata E0 & E1 */ err = phy_write(phydev, 0x1d, 0x001b); @@ -682,7 +708,7 @@ static int m88e1145_config_init(struct phy_device *phydev) } if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR); + temp = phy_read(phydev, MII_M1145_PHY_EXT_SR); if (temp < 0) return temp; @@ -789,6 +815,12 @@ static int marvell_read_status(struct phy_device *phydev) return 0; } +static int marvell_aneg_done(struct phy_device *phydev) +{ + int retval = phy_read(phydev, MII_M1011_PHY_STATUS); + return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED); +} + static int m88e1121_did_interrupt(struct phy_device *phydev) { int imask; @@ -1069,22 +1101,26 @@ static struct phy_driver marvell_drivers[] = { .suspend = &genphy_suspend, .driver = { .owner = THIS_MODULE }, }, + { + .phy_id = MARVELL_PHY_ID_88E3016, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E3016", + .features = PHY_BASIC_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &genphy_config_aneg, + .config_init = &m88e3016_config_init, + .aneg_done = &marvell_aneg_done, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .resume = &genphy_resume, + .suspend = &genphy_suspend, + .driver = { .owner = THIS_MODULE }, + }, }; -static int __init marvell_init(void) -{ - return phy_drivers_register(marvell_drivers, - ARRAY_SIZE(marvell_drivers)); -} - -static void __exit marvell_exit(void) -{ - phy_drivers_unregister(marvell_drivers, - ARRAY_SIZE(marvell_drivers)); -} - -module_init(marvell_init); -module_exit(marvell_exit); +module_phy_driver(marvell_drivers); static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK }, @@ -1098,6 +1134,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { } }; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 8c2a29a9bd7f2efc53e85945a2860ff4687b3bed..c530de1e63f5d5eb5bf0ee6e12a7cb25cded50bb 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -6,6 +6,7 @@ * Author: David J. Choi * * Copyright (c) 2010-2013 Micrel, Inc. + * Copyright (c) 2014 Johan Hovold * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -30,30 +31,32 @@ /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 -#define KSZPHY_OMSO_B_CAST_OFF (1 << 9) -#define KSZPHY_OMSO_RMII_OVERRIDE (1 << 1) -#define KSZPHY_OMSO_MII_OVERRIDE (1 << 0) +#define KSZPHY_OMSO_B_CAST_OFF BIT(9) +#define KSZPHY_OMSO_RMII_OVERRIDE BIT(1) +#define KSZPHY_OMSO_MII_OVERRIDE BIT(0) /* general Interrupt control/status reg in vendor specific block. */ #define MII_KSZPHY_INTCS 0x1B -#define KSZPHY_INTCS_JABBER (1 << 15) -#define KSZPHY_INTCS_RECEIVE_ERR (1 << 14) -#define KSZPHY_INTCS_PAGE_RECEIVE (1 << 13) -#define KSZPHY_INTCS_PARELLEL (1 << 12) -#define KSZPHY_INTCS_LINK_PARTNER_ACK (1 << 11) -#define KSZPHY_INTCS_LINK_DOWN (1 << 10) -#define KSZPHY_INTCS_REMOTE_FAULT (1 << 9) -#define KSZPHY_INTCS_LINK_UP (1 << 8) +#define KSZPHY_INTCS_JABBER BIT(15) +#define KSZPHY_INTCS_RECEIVE_ERR BIT(14) +#define KSZPHY_INTCS_PAGE_RECEIVE BIT(13) +#define KSZPHY_INTCS_PARELLEL BIT(12) +#define KSZPHY_INTCS_LINK_PARTNER_ACK BIT(11) +#define KSZPHY_INTCS_LINK_DOWN BIT(10) +#define KSZPHY_INTCS_REMOTE_FAULT BIT(9) +#define KSZPHY_INTCS_LINK_UP BIT(8) #define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\ KSZPHY_INTCS_LINK_DOWN) -/* general PHY control reg in vendor specific block. */ -#define MII_KSZPHY_CTRL 0x1F +/* PHY Control 1 */ +#define MII_KSZPHY_CTRL_1 0x1e + +/* PHY Control 2 / PHY Control (if no PHY Control 1) */ +#define MII_KSZPHY_CTRL_2 0x1f +#define MII_KSZPHY_CTRL MII_KSZPHY_CTRL_2 /* bitmap of PHY register to set interrupt mode */ -#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9) -#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14) -#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) -#define KSZ8051_RMII_50MHZ_CLK (1 << 7) +#define KSZPHY_CTRL_INT_ACTIVE_HIGH BIT(9) +#define KSZPHY_RMII_REF_CLK_SEL BIT(7) /* Write/read to/from extended registers */ #define MII_KSZPHY_EXTREG 0x0b @@ -69,20 +72,46 @@ #define PS_TO_REG 200 -static int ksz_config_flags(struct phy_device *phydev) -{ - int regval; +struct kszphy_type { + u32 led_mode_reg; + u16 interrupt_level_mask; + bool has_broadcast_disable; + bool has_rmii_ref_clk_sel; +}; - if (phydev->dev_flags & (MICREL_PHY_50MHZ_CLK | MICREL_PHY_25MHZ_CLK)) { - regval = phy_read(phydev, MII_KSZPHY_CTRL); - if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) - regval |= KSZ8051_RMII_50MHZ_CLK; - else - regval &= ~KSZ8051_RMII_50MHZ_CLK; - return phy_write(phydev, MII_KSZPHY_CTRL, regval); - } - return 0; -} +struct kszphy_priv { + const struct kszphy_type *type; + int led_mode; + bool rmii_ref_clk_sel; + bool rmii_ref_clk_sel_val; +}; + +static const struct kszphy_type ksz8021_type = { + .led_mode_reg = MII_KSZPHY_CTRL_2, + .has_rmii_ref_clk_sel = true, +}; + +static const struct kszphy_type ksz8041_type = { + .led_mode_reg = MII_KSZPHY_CTRL_1, +}; + +static const struct kszphy_type ksz8051_type = { + .led_mode_reg = MII_KSZPHY_CTRL_2, +}; + +static const struct kszphy_type ksz8081_type = { + .led_mode_reg = MII_KSZPHY_CTRL_2, + .has_broadcast_disable = true, + .has_rmii_ref_clk_sel = true, +}; + +static const struct kszphy_type ks8737_type = { + .interrupt_level_mask = BIT(14), +}; + +static const struct kszphy_type ksz9021_type = { + .interrupt_level_mask = BIT(14), +}; static int kszphy_extended_write(struct phy_device *phydev, u32 regnum, u16 val) @@ -108,112 +137,137 @@ static int kszphy_ack_interrupt(struct phy_device *phydev) return (rc < 0) ? rc : 0; } -static int kszphy_set_interrupt(struct phy_device *phydev) +static int kszphy_config_intr(struct phy_device *phydev) { + const struct kszphy_type *type = phydev->drv->driver_data; int temp; - temp = (PHY_INTERRUPT_ENABLED == phydev->interrupts) ? - KSZPHY_INTCS_ALL : 0; - return phy_write(phydev, MII_KSZPHY_INTCS, temp); -} + u16 mask; -static int kszphy_config_intr(struct phy_device *phydev) -{ - int temp, rc; + if (type && type->interrupt_level_mask) + mask = type->interrupt_level_mask; + else + mask = KSZPHY_CTRL_INT_ACTIVE_HIGH; /* set the interrupt pin active low */ temp = phy_read(phydev, MII_KSZPHY_CTRL); - temp &= ~KSZPHY_CTRL_INT_ACTIVE_HIGH; + if (temp < 0) + return temp; + temp &= ~mask; phy_write(phydev, MII_KSZPHY_CTRL, temp); - rc = kszphy_set_interrupt(phydev); - return rc < 0 ? rc : 0; -} -static int ksz9021_config_intr(struct phy_device *phydev) -{ - int temp, rc; + /* enable / disable interrupts */ + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + temp = KSZPHY_INTCS_ALL; + else + temp = 0; - /* set the interrupt pin active low */ - temp = phy_read(phydev, MII_KSZPHY_CTRL); - temp &= ~KSZ9021_CTRL_INT_ACTIVE_HIGH; - phy_write(phydev, MII_KSZPHY_CTRL, temp); - rc = kszphy_set_interrupt(phydev); - return rc < 0 ? rc : 0; + return phy_write(phydev, MII_KSZPHY_INTCS, temp); } -static int ks8737_config_intr(struct phy_device *phydev) +static int kszphy_rmii_clk_sel(struct phy_device *phydev, bool val) { - int temp, rc; + int ctrl; - /* set the interrupt pin active low */ - temp = phy_read(phydev, MII_KSZPHY_CTRL); - temp &= ~KS8737_CTRL_INT_ACTIVE_HIGH; - phy_write(phydev, MII_KSZPHY_CTRL, temp); - rc = kszphy_set_interrupt(phydev); - return rc < 0 ? rc : 0; -} - -static int kszphy_setup_led(struct phy_device *phydev, - unsigned int reg, unsigned int shift) -{ + ctrl = phy_read(phydev, MII_KSZPHY_CTRL); + if (ctrl < 0) + return ctrl; - struct device *dev = &phydev->dev; - struct device_node *of_node = dev->of_node; - int rc, temp; - u32 val; + if (val) + ctrl |= KSZPHY_RMII_REF_CLK_SEL; + else + ctrl &= ~KSZPHY_RMII_REF_CLK_SEL; - if (!of_node && dev->parent->of_node) - of_node = dev->parent->of_node; + return phy_write(phydev, MII_KSZPHY_CTRL, ctrl); +} - if (of_property_read_u32(of_node, "micrel,led-mode", &val)) - return 0; +static int kszphy_setup_led(struct phy_device *phydev, u32 reg, int val) +{ + int rc, temp, shift; + + switch (reg) { + case MII_KSZPHY_CTRL_1: + shift = 14; + break; + case MII_KSZPHY_CTRL_2: + shift = 4; + break; + default: + return -EINVAL; + } temp = phy_read(phydev, reg); - if (temp < 0) - return temp; + if (temp < 0) { + rc = temp; + goto out; + } temp &= ~(3 << shift); temp |= val << shift; rc = phy_write(phydev, reg, temp); +out: + if (rc < 0) + dev_err(&phydev->dev, "failed to set led mode\n"); - return rc < 0 ? rc : 0; + return rc; } -static int kszphy_config_init(struct phy_device *phydev) +/* Disable PHY address 0 as the broadcast address, so that it can be used as a + * unique (non-broadcast) address on a shared bus. + */ +static int kszphy_broadcast_disable(struct phy_device *phydev) { - return 0; -} + int ret; -static int kszphy_config_init_led8041(struct phy_device *phydev) -{ - /* single led control, register 0x1e bits 15..14 */ - return kszphy_setup_led(phydev, 0x1e, 14); + ret = phy_read(phydev, MII_KSZPHY_OMSO); + if (ret < 0) + goto out; + + ret = phy_write(phydev, MII_KSZPHY_OMSO, ret | KSZPHY_OMSO_B_CAST_OFF); +out: + if (ret) + dev_err(&phydev->dev, "failed to disable broadcast address\n"); + + return ret; } -static int ksz8021_config_init(struct phy_device *phydev) +static int kszphy_config_init(struct phy_device *phydev) { - const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE; - int rc; + struct kszphy_priv *priv = phydev->priv; + const struct kszphy_type *type; + int ret; - rc = kszphy_setup_led(phydev, 0x1f, 4); - if (rc) - dev_err(&phydev->dev, "failed to set led mode\n"); + if (!priv) + return 0; - rc = ksz_config_flags(phydev); - if (rc < 0) - return rc; - rc = phy_write(phydev, MII_KSZPHY_OMSO, val); - return rc < 0 ? rc : 0; + type = priv->type; + + if (type->has_broadcast_disable) + kszphy_broadcast_disable(phydev); + + if (priv->rmii_ref_clk_sel) { + ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); + if (ret) { + dev_err(&phydev->dev, "failed to set rmii reference clock\n"); + return ret; + } + } + + if (priv->led_mode >= 0) + kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); + + return 0; } -static int ks8051_config_init(struct phy_device *phydev) +static int ksz8021_config_init(struct phy_device *phydev) { int rc; - rc = kszphy_setup_led(phydev, 0x1f, 4); + rc = kszphy_config_init(phydev); if (rc) - dev_err(&phydev->dev, "failed to set led mode\n"); + return rc; + + rc = kszphy_broadcast_disable(phydev); - rc = ksz_config_flags(phydev); return rc < 0 ? rc : 0; } @@ -394,8 +448,8 @@ static int ksz9031_config_init(struct phy_device *phydev) } #define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 -#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6) -#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4) +#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX BIT(6) +#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED BIT(4) static int ksz8873mll_read_status(struct phy_device *phydev) { int regval; @@ -446,24 +500,62 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, { } -static int ksz8021_probe(struct phy_device *phydev) +static int kszphy_probe(struct phy_device *phydev) { + const struct kszphy_type *type = phydev->drv->driver_data; + struct device_node *np = phydev->dev.of_node; + struct kszphy_priv *priv; struct clk *clk; + int ret; + + priv = devm_kzalloc(&phydev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + priv->type = type; + + if (type->led_mode_reg) { + ret = of_property_read_u32(np, "micrel,led-mode", + &priv->led_mode); + if (ret) + priv->led_mode = -1; + + if (priv->led_mode > 3) { + dev_err(&phydev->dev, "invalid led mode: 0x%02x\n", + priv->led_mode); + priv->led_mode = -1; + } + } else { + priv->led_mode = -1; + } clk = devm_clk_get(&phydev->dev, "rmii-ref"); if (!IS_ERR(clk)) { unsigned long rate = clk_get_rate(clk); + bool rmii_ref_clk_sel_25_mhz; + + priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel; + rmii_ref_clk_sel_25_mhz = of_property_read_bool(np, + "micrel,rmii-reference-clock-select-25-mhz"); if (rate > 24500000 && rate < 25500000) { - phydev->dev_flags |= MICREL_PHY_25MHZ_CLK; + priv->rmii_ref_clk_sel_val = rmii_ref_clk_sel_25_mhz; } else if (rate > 49500000 && rate < 50500000) { - phydev->dev_flags |= MICREL_PHY_50MHZ_CLK; + priv->rmii_ref_clk_sel_val = !rmii_ref_clk_sel_25_mhz; } else { dev_err(&phydev->dev, "Clock rate out of range: %ld\n", rate); return -EINVAL; } } + /* Support legacy board-file configuration */ + if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) { + priv->rmii_ref_clk_sel = true; + priv->rmii_ref_clk_sel_val = true; + } + return 0; } @@ -474,11 +566,12 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KS8737", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ks8737_type, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, - .config_intr = ks8737_config_intr, + .config_intr = kszphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE,}, @@ -489,7 +582,8 @@ static struct phy_driver ksphy_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .probe = ksz8021_probe, + .driver_data = &ksz8021_type, + .probe = kszphy_probe, .config_init = ksz8021_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -505,7 +599,8 @@ static struct phy_driver ksphy_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .probe = ksz8021_probe, + .driver_data = &ksz8021_type, + .probe = kszphy_probe, .config_init = ksz8021_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -521,7 +616,9 @@ static struct phy_driver ksphy_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = kszphy_config_init_led8041, + .driver_data = &ksz8041_type, + .probe = kszphy_probe, + .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, @@ -536,7 +633,9 @@ static struct phy_driver ksphy_driver[] = { .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = kszphy_config_init_led8041, + .driver_data = &ksz8041_type, + .probe = kszphy_probe, + .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, @@ -551,7 +650,9 @@ static struct phy_driver ksphy_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = ks8051_config_init, + .driver_data = &ksz8051_type, + .probe = kszphy_probe, + .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, @@ -565,7 +666,9 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x00ffffff, .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = kszphy_config_init_led8041, + .driver_data = &ksz8041_type, + .probe = kszphy_probe, + .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, @@ -579,6 +682,8 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x00fffff0, .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ksz8081_type, + .probe = kszphy_probe, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -607,11 +712,12 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ9021 Gigabit PHY", .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ksz9021_type, .config_init = ksz9021_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, - .config_intr = ksz9021_config_intr, + .config_intr = kszphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .read_mmd_indirect = ksz9021_rd_mmd_phyreg, @@ -623,11 +729,12 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ9031 Gigabit PHY", .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ksz9021_type, .config_init = ksz9031_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, - .config_intr = ksz9021_config_intr, + .config_intr = kszphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE, }, @@ -657,20 +764,7 @@ static struct phy_driver ksphy_driver[] = { .driver = { .owner = THIS_MODULE, }, } }; -static int __init ksphy_init(void) -{ - return phy_drivers_register(ksphy_driver, - ARRAY_SIZE(ksphy_driver)); -} - -static void __exit ksphy_exit(void) -{ - phy_drivers_unregister(ksphy_driver, - ARRAY_SIZE(ksphy_driver)); -} - -module_init(ksphy_init); -module_exit(ksphy_exit); +module_phy_driver(ksphy_driver); MODULE_DESCRIPTION("Micrel PHY driver"); MODULE_AUTHOR("David J. Choi"); diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 9a5f234d95b05189cfed229a03164dba2adbc486..0a7b9c7f09a299d51f1388cdd3342e8e7fe120ad 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -129,7 +129,7 @@ static int ns_config_init(struct phy_device *phydev) return ns_ack_interrupt(phydev); } -static struct phy_driver dp83865_driver = { +static struct phy_driver dp83865_driver[] = { { .phy_id = DP83865_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83865", @@ -141,25 +141,14 @@ static struct phy_driver dp83865_driver = { .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, .driver = {.owner = THIS_MODULE,} -}; +} }; -static int __init ns_init(void) -{ - return phy_driver_register(&dp83865_driver); -} - -static void __exit ns_exit(void) -{ - phy_driver_unregister(&dp83865_driver); -} +module_phy_driver(dp83865_driver); MODULE_DESCRIPTION("NatSemi PHY driver"); MODULE_AUTHOR("Stuart Menefy"); MODULE_LICENSE("GPL"); -module_init(ns_init); -module_exit(ns_exit); - static struct mdio_device_id __maybe_unused ns_tbl[] = { { DP83865_PHY_ID, 0xfffffff0 }, { } diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index fe0d0a15d5e1498801b2a0d5415da1a5a1c1edeb..be4c6f7c3645fc69c4d7d2bce02366d23e17eaf1 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -111,7 +111,7 @@ static int qs6612_config_intr(struct phy_device *phydev) } -static struct phy_driver qs6612_driver = { +static struct phy_driver qs6612_driver[] = { { .phy_id = 0x00181440, .name = "QS6612", .phy_id_mask = 0xfffffff0, @@ -123,20 +123,9 @@ static struct phy_driver qs6612_driver = { .ack_interrupt = qs6612_ack_interrupt, .config_intr = qs6612_config_intr, .driver = { .owner = THIS_MODULE,}, -}; - -static int __init qs6612_init(void) -{ - return phy_driver_register(&qs6612_driver); -} - -static void __exit qs6612_exit(void) -{ - phy_driver_unregister(&qs6612_driver); -} +} }; -module_init(qs6612_init); -module_exit(qs6612_exit); +module_phy_driver(qs6612_driver); static struct mdio_device_id __maybe_unused qs6612_tbl[] = { { 0x00181440, 0xfffffff0 }, diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 45483fdfbe063647c620f3b46db9772c0c17bd7a..96a0f0fab3caca1cf9b315b35d6f0e75cd3f13a3 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -101,18 +101,7 @@ static struct phy_driver realtek_drvs[] = { }, }; -static int __init realtek_init(void) -{ - return phy_drivers_register(realtek_drvs, ARRAY_SIZE(realtek_drvs)); -} - -static void __exit realtek_exit(void) -{ - phy_drivers_unregister(realtek_drvs, ARRAY_SIZE(realtek_drvs)); -} - -module_init(realtek_init); -module_exit(realtek_exit); +module_phy_driver(realtek_drvs); static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc912, 0x001fffff }, diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index a4b08198fb9f28363ef1a49a5b84ec64edeabcf9..c0f6479e19d48e51fd76c3bbce161ffdd7846842 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -250,24 +250,12 @@ static struct phy_driver smsc_phy_driver[] = { .driver = { .owner = THIS_MODULE, } } }; -static int __init smsc_init(void) -{ - return phy_drivers_register(smsc_phy_driver, - ARRAY_SIZE(smsc_phy_driver)); -} - -static void __exit smsc_exit(void) -{ - phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver)); -} +module_phy_driver(smsc_phy_driver); MODULE_DESCRIPTION("SMSC PHY driver"); MODULE_AUTHOR("Herbert Valerio Riedel"); MODULE_LICENSE("GPL"); -module_init(smsc_init); -module_exit(smsc_exit); - static struct mdio_device_id __maybe_unused smsc_tbl[] = { { 0x0007c0a0, 0xfffffff0 }, { 0x0007c0b0, 0xfffffff0 }, diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index eab57fc5b967c94bdaeeb7cb958ef884aca8086e..46530159256b3c8c09ad528caf38a5a7cfdb8295 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -353,7 +353,9 @@ static int ks8995_probe(struct spi_device *spi) static int ks8995_remove(struct spi_device *spi) { - sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr); + struct ks8995_switch *ks = spi_get_drvdata(spi); + + sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr); return 0; } diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index 5e1eb138916f0c68cea34323ff5f146a60fbeaf8..3fc199b773e633badeb2aa4b1b1a70877280b4cf 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -112,20 +112,7 @@ static struct phy_driver ste10xp_pdriver[] = { .driver = {.owner = THIS_MODULE,} } }; -static int __init ste10Xp_init(void) -{ - return phy_drivers_register(ste10xp_pdriver, - ARRAY_SIZE(ste10xp_pdriver)); -} - -static void __exit ste10Xp_exit(void) -{ - phy_drivers_unregister(ste10xp_pdriver, - ARRAY_SIZE(ste10xp_pdriver)); -} - -module_init(ste10Xp_init); -module_exit(ste10Xp_exit); +module_phy_driver(ste10xp_pdriver); static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = { { STE101P_PHY_ID, 0xfffffff0 }, diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 5dc0935da99c52a07ba7c09da27503b8ee0b2695..76cad712ddb2c7c6bc2794389380e4e0a862d5f5 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -311,19 +311,7 @@ static struct phy_driver vsc82xx_driver[] = { .driver = { .owner = THIS_MODULE,}, } }; -static int __init vsc82xx_init(void) -{ - return phy_drivers_register(vsc82xx_driver, - ARRAY_SIZE(vsc82xx_driver)); -} - -static void __exit vsc82xx_exit(void) -{ - phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver)); -} - -module_init(vsc82xx_init); -module_exit(vsc82xx_exit); +module_phy_driver(vsc82xx_driver); static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8234, 0x000ffff0 }, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 794a4732936883c662212e0769e62993e67d96b0..af034dba9bd62a693ba81dd04479cd11ffc7f10e 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -417,6 +417,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf, ssize_t ret; struct sk_buff *skb = NULL; struct iovec iov; + struct iov_iter to; ret = count; @@ -462,7 +463,8 @@ static ssize_t ppp_read(struct file *file, char __user *buf, ret = -EFAULT; iov.iov_base = buf; iov.iov_len = count; - if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len)) + iov_iter_init(&to, READ, &iov, 1, count); + if (skb_copy_datagram_iter(skb, 0, &to, skb->len)) goto outf; ret = skb->len; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 6c9c16d76935f5db5db13bdb54b0e6530b15c8ed..d2408a5e43a6a1fdef0ba85f5fb19b10ffca4743 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -869,7 +869,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr)); start = (char *)&ph->tag[0]; - error = memcpy_fromiovec(start, m->msg_iov, total_len); + error = memcpy_from_msg(start, m, total_len); if (error < 0) { kfree_skb(skb); goto end; @@ -981,7 +981,7 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, if (skb) { total_len = min_t(size_t, total_len, skb->len); - error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); + error = skb_copy_datagram_msg(skb, 0, m, total_len); if (error == 0) { consume_skb(skb); return total_len; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 2368395d8ae5acb957e8a819f71dc08480c31ed1..93e224217e24b36b089102be11ada5921f62d83b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1179,6 +1179,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_enable_netpoll; } + if (!(dev->features & NETIF_F_LRO)) + dev_disable_lro(port_dev); + err = netdev_rx_handler_register(port_dev, team_handle_frame, port); if (err) { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 798ce70e3d6144c408c52a829f21627b17ce0d78..a5cbf67517f09e08e728a44dd9917e2792d37d9d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -71,6 +71,7 @@ #include #include #include +#include #include @@ -837,7 +838,7 @@ drop: skb_tx_error(skb); kfree_skb(skb); rcu_read_unlock(); - return NETDEV_TX_OK; + return NET_XMIT_DROP; } static void tun_net_mclist(struct net_device *dev) @@ -1030,28 +1031,29 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, - void *msg_control, const struct iovec *iv, - size_t total_len, size_t count, int noblock) + void *msg_control, struct iov_iter *from, + int noblock) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; + size_t total_len = iov_iter_count(from); size_t len = total_len, align = NET_SKB_PAD, linear; struct virtio_net_hdr gso = { 0 }; int good_linear; - int offset = 0; int copylen; bool zerocopy = false; int err; u32 rxhash; + ssize_t n; if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; len -= sizeof(pi); - if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) + n = copy_from_iter(&pi, sizeof(pi), from); + if (n != sizeof(pi)) return -EFAULT; - offset += sizeof(pi); } if (tun->flags & IFF_VNET_HDR) { @@ -1059,7 +1061,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EINVAL; len -= tun->vnet_hdr_sz; - if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) + n = copy_from_iter(&gso, sizeof(gso), from); + if (n != sizeof(gso)) return -EFAULT; if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && @@ -1068,7 +1071,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (tun16_to_cpu(tun, gso.hdr_len) > len) return -EINVAL; - offset += tun->vnet_hdr_sz; + iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso)); } if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { @@ -1081,6 +1084,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, good_linear = SKB_MAX_HEAD(align); if (msg_control) { + struct iov_iter i = *from; + /* There are 256 bytes to be copied in skb, so there is * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. @@ -1089,7 +1094,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (copylen > good_linear) copylen = good_linear; linear = copylen; - if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) + iov_iter_advance(&i, copylen); + if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) zerocopy = true; } @@ -1109,9 +1115,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } if (zerocopy) - err = zerocopy_sg_from_iovec(skb, iv, offset, count); + err = zerocopy_sg_from_iter(skb, from); else { - err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len); + err = skb_copy_datagram_from_iter(skb, 0, from, len); if (!err && msg_control) { struct ubuf_info *uarg = msg_control; uarg->callback(uarg, false); @@ -1225,8 +1231,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return total_len; } -static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, - unsigned long count, loff_t pos) +static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct tun_struct *tun = tun_get(file); @@ -1236,10 +1241,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, if (!tun) return -EBADFD; - tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count); - - result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count), - count, file->f_flags & O_NONBLOCK); + result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK); tun_put(tun); return result; @@ -1249,11 +1251,11 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, - const struct iovec *iv, int len) + struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; - ssize_t total = 0; - int vlan_offset = 0, copied; + ssize_t total; + int vlan_offset = 0; int vlan_hlen = 0; int vnet_hdr_sz = 0; @@ -1263,23 +1265,25 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (tun->flags & IFF_VNET_HDR) vnet_hdr_sz = tun->vnet_hdr_sz; + total = skb->len + vlan_hlen + vnet_hdr_sz; + if (!(tun->flags & IFF_NO_PI)) { - if ((len -= sizeof(pi)) < 0) + if (iov_iter_count(iter) < sizeof(pi)) return -EINVAL; - if (len < skb->len + vlan_hlen + vnet_hdr_sz) { + total += sizeof(pi); + if (iov_iter_count(iter) < total) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } - if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi))) + if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) return -EFAULT; - total += sizeof(pi); } if (vnet_hdr_sz) { struct virtio_net_hdr gso = { 0 }; /* no info leak */ - if ((len -= vnet_hdr_sz) < 0) + if (iov_iter_count(iter) < vnet_hdr_sz) return -EINVAL; if (skb_is_gso(skb)) { @@ -1318,17 +1322,14 @@ static ssize_t tun_put_user(struct tun_struct *tun, gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; } /* else everything is zero */ - if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, - sizeof(gso)))) + if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) return -EFAULT; - total += vnet_hdr_sz; + + iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } - copied = total; - len = min_t(int, skb->len + vlan_hlen, len); - total += skb->len + vlan_hlen; if (vlan_hlen) { - int copy, ret; + int ret; struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; @@ -1339,41 +1340,36 @@ static ssize_t tun_put_user(struct tun_struct *tun, vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); - copy = min_t(int, vlan_offset, len); - ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); - len -= copy; - copied += copy; - if (ret || !len) + ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); + if (ret || !iov_iter_count(iter)) goto done; - copy = min_t(int, sizeof(veth), len); - ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); - len -= copy; - copied += copy; - if (ret || !len) + ret = copy_to_iter(&veth, sizeof(veth), iter); + if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } - skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); + skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: tun->dev->stats.tx_packets++; - tun->dev->stats.tx_bytes += len; + tun->dev->stats.tx_bytes += skb->len + vlan_hlen; return total; } static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, - const struct iovec *iv, ssize_t len, int noblock) + struct iov_iter *to, + int noblock) { struct sk_buff *skb; - ssize_t ret = 0; + ssize_t ret; int peeked, err, off = 0; tun_debug(KERN_INFO, tun, "tun_do_read\n"); - if (!len) - return ret; + if (!iov_iter_count(to)) + return 0; if (tun->dev->reg_state != NETREG_REGISTERED) return -EIO; @@ -1381,37 +1377,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, /* Read frames from queue */ skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0, &peeked, &off, &err); - if (skb) { - ret = tun_put_user(tun, tfile, skb, iv, len); + if (!skb) + return 0; + + ret = tun_put_user(tun, tfile, skb, to); + if (unlikely(ret < 0)) kfree_skb(skb); - } else - ret = err; + else + consume_skb(skb); return ret; } -static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, - unsigned long count, loff_t pos) +static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = __tun_get(tfile); - ssize_t len, ret; + ssize_t len = iov_iter_count(to), ret; if (!tun) return -EBADFD; - len = iov_length(iv, count); - if (len < 0) { - ret = -EINVAL; - goto out; - } - - ret = tun_do_read(tun, tfile, iv, len, - file->f_flags & O_NONBLOCK); + ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; -out: tun_put(tun); return ret; } @@ -1481,8 +1471,9 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, if (!tun) return -EBADFD; - ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len, - m->msg_iovlen, m->msg_flags & MSG_DONTWAIT); + + ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, + m->msg_flags & MSG_DONTWAIT); tun_put(tun); return ret; } @@ -1507,8 +1498,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } - ret = tun_do_read(tun, tfile, m->msg_iov, total_len, - flags & MSG_DONTWAIT); + ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; @@ -2219,10 +2209,10 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f) static const struct file_operations tun_fops = { .owner = THIS_MODULE, .llseek = no_llseek, - .read = do_sync_read, - .aio_read = tun_chr_aio_read, - .write = do_sync_write, - .aio_write = tun_chr_aio_write, + .read = new_sync_read, + .write = new_sync_write, + .read_iter = tun_chr_read_iter, + .write_iter = tun_chr_write_iter, .poll = tun_chr_poll, .unlocked_ioctl = tun_chr_ioctl, #ifdef CONFIG_COMPAT diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 816d511e34d33065a80cd171bab45ec25b8af3db..bf49792062a2b40c2f1bd2f5a06e6eff8954ab90 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -487,8 +487,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) { - if (dev->driver_priv) - kfree(dev->driver_priv); + kfree(dev->driver_priv); } static const struct ethtool_ops ax88178_ethtool_ops = { diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 2ec1500d0077529c0fd32d74c1abeb9d5f373aa8..415ce8b882c615f08d7368cdfa7bec68bef01fa7 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) struct page *page; int err; - page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL); + page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC); if (!page) return -ENOMEM; @@ -212,7 +212,7 @@ resubmit: if (page) put_page(page); if (req) - rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD); + rx_submit(pnd, req, GFP_ATOMIC); } static int usbpn_close(struct net_device *dev); @@ -231,7 +231,7 @@ static int usbpn_open(struct net_device *dev) for (i = 0; i < rxq_size; i++) { struct urb *req = usb_alloc_urb(0, GFP_KERNEL); - if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { + if (!req || rx_submit(pnd, req, GFP_KERNEL)) { usb_free_urb(req); usbpn_close(dev); return -ENOMEM; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index d3920b54a92ce3de23a581d3cf80e99aa7d61e72..9311a08565bed17cf5082a21b9c6dca7ab02dfe1 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -71,18 +71,19 @@ static void usbnet_cdc_update_filter(struct usbnet *dev) { struct cdc_state *info = (void *) &dev->data; struct usb_interface *intf = info->control; + struct net_device *net = dev->net; - u16 cdc_filter = - USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | - USB_CDC_PACKET_TYPE_BROADCAST; + u16 cdc_filter = USB_CDC_PACKET_TYPE_DIRECTED + | USB_CDC_PACKET_TYPE_BROADCAST; - if (dev->net->flags & IFF_PROMISC) - cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS; - - /* FIXME cdc-ether has some multicast code too, though it complains - * in routine cases. info->ether describes the multicast support. - * Implement that here, manipulating the cdc filter as needed. + /* filtering on the device is an optional feature and not worth + * the hassle so we just roughly care about snooping and if any + * multicast is requested, we take every multicast */ + if (net->flags & IFF_PROMISC) + cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS; + if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) + cdc_filter |= USB_CDC_PACKET_TYPE_ALL_MULTICAST; usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 5ee7a1dbc023833a5907a6db48695600bb728fd9..96fc8a5bde8416a471ed4cd07e657025bc222fab 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -402,7 +402,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_ /* map MBIM session to VLAN */ if (tci) - vlan_put_tag(skb, htons(ETH_P_8021Q), tci); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci); err: return skb; } diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index babda7d8693ebcd058d232d9c1e986fbdf925ddf..9c5aa922a9f4bdb404588b74962769510be13887 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2746,8 +2746,7 @@ exit: tty_unregister_device(tty_drv, serial->minor); kfree(serial); } - if (hso_dev) - kfree(hso_dev); + kfree(hso_dev); return NULL; } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c6554c7a8147045450ec8385797038f1ceba955f..2d1c77e81836c617364d668eecf064d0256063f5 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -24,6 +24,7 @@ #include #include #include +#include /* Version Information */ #define DRIVER_VERSION "v1.07.0 (2014/10/09)" @@ -461,18 +462,11 @@ enum rtl8152_flags { /* Define these values to match your device */ #define VENDOR_ID_REALTEK 0x0bda -#define PRODUCT_ID_RTL8152 0x8152 -#define PRODUCT_ID_RTL8153 0x8153 - #define VENDOR_ID_SAMSUNG 0x04e8 -#define PRODUCT_ID_SAMSUNG 0xa101 #define MCU_TYPE_PLA 0x0100 #define MCU_TYPE_USB 0x0000 -#define REALTEK_USB_DEVICE(vend, prod) \ - USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC) - struct tally_counter { __le64 tx_packets; __le64 rx_packets; @@ -486,7 +480,7 @@ struct tally_counter { __le64 rx_broadcast; __le32 rx_multicast; __le16 tx_aborted; - __le16 tx_underun; + __le16 tx_underrun; }; struct rx_desc { @@ -690,6 +684,9 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, } } + if (ret == -ENODEV) + set_bit(RTL8152_UNPLUG, &tp->flags); + return ret; } @@ -757,6 +754,9 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, } error1: + if (ret == -ENODEV) + set_bit(RTL8152_UNPLUG, &tp->flags); + return ret; } @@ -1030,7 +1030,6 @@ static void read_bulk_callback(struct urb *urb) int status = urb->status; struct rx_agg *agg; struct r8152 *tp; - int result; agg = urb->context; if (!agg) @@ -1081,15 +1080,7 @@ static void read_bulk_callback(struct urb *urb) break; } - result = r8152_submit_rx(tp, agg, GFP_ATOMIC); - if (result == -ENODEV) { - netif_device_detach(tp->netdev); - } else if (result) { - spin_lock(&tp->rx_lock); - list_add_tail(&agg->list, &tp->rx_done); - spin_unlock(&tp->rx_lock); - tasklet_schedule(&tp->tl); - } + r8152_submit_rx(tp, agg, GFP_ATOMIC); } static void write_bulk_callback(struct urb *urb) @@ -1190,11 +1181,13 @@ static void intr_callback(struct urb *urb) resubmit: res = usb_submit_urb(urb, GFP_ATOMIC); - if (res == -ENODEV) + if (res == -ENODEV) { + set_bit(RTL8152_UNPLUG, &tp->flags); netif_device_detach(tp->netdev); - else if (res) + } else if (res) { netif_err(tp, intr, tp->netdev, "can't resubmit intr, status %d\n", res); + } } static inline void *rx_agg_align(void *data) @@ -1250,7 +1243,6 @@ static int alloc_all_mem(struct r8152 *tp) spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->tx_lock); - INIT_LIST_HEAD(&tp->rx_done); INIT_LIST_HEAD(&tp->tx_free); skb_queue_head_init(&tp->tx_queue); @@ -1676,7 +1668,6 @@ static void rx_bottom(struct r8152 *tp) int len_used = 0; struct urb *urb; u8 *rx_data; - int ret; list_del_init(cursor); @@ -1729,13 +1720,7 @@ find_next_rx: } submit: - ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); - if (ret && ret != -ENODEV) { - spin_lock_irqsave(&tp->rx_lock, flags); - list_add_tail(&agg->list, &tp->rx_done); - spin_unlock_irqrestore(&tp->rx_lock, flags); - tasklet_schedule(&tp->tl); - } + r8152_submit_rx(tp, agg, GFP_ATOMIC); } } @@ -1758,6 +1743,7 @@ static void tx_bottom(struct r8152 *tp) struct net_device *netdev = tp->netdev; if (res == -ENODEV) { + set_bit(RTL8152_UNPLUG, &tp->flags); netif_device_detach(netdev); } else { struct net_device_stats *stats = &netdev->stats; @@ -1792,6 +1778,8 @@ static void bottom_half(unsigned long data) if (!netif_carrier_ok(tp->netdev)) return; + clear_bit(SCHEDULE_TASKLET, &tp->flags); + rx_bottom(tp); tx_bottom(tp); } @@ -1799,11 +1787,28 @@ static void bottom_half(unsigned long data) static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) { + int ret; + usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), agg->head, agg_buf_sz, (usb_complete_t)read_bulk_callback, agg); - return usb_submit_urb(agg->urb, mem_flags); + ret = usb_submit_urb(agg->urb, mem_flags); + if (ret == -ENODEV) { + set_bit(RTL8152_UNPLUG, &tp->flags); + netif_device_detach(tp->netdev); + } else if (ret) { + struct urb *urb = agg->urb; + unsigned long flags; + + urb->actual_length = 0; + spin_lock_irqsave(&tp->rx_lock, flags); + list_add_tail(&agg->list, &tp->rx_done); + spin_unlock_irqrestore(&tp->rx_lock, flags); + tasklet_schedule(&tp->tl); + } + + return ret; } static void rtl_drop_queued_tx(struct r8152 *tp) @@ -1994,6 +1999,25 @@ static int rtl_start_rx(struct r8152 *tp) break; } + if (ret && ++i < RTL8152_MAX_RX) { + struct list_head rx_queue; + unsigned long flags; + + INIT_LIST_HEAD(&rx_queue); + + do { + struct rx_agg *agg = &tp->rx_info[i++]; + struct urb *urb = agg->urb; + + urb->actual_length = 0; + list_add_tail(&agg->list, &rx_queue); + } while (i < RTL8152_MAX_RX); + + spin_lock_irqsave(&tp->rx_lock, flags); + list_splice_tail(&rx_queue, &tp->rx_done); + spin_unlock_irqrestore(&tp->rx_lock, flags); + } + return ret; } @@ -2850,15 +2874,18 @@ static void rtl_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, schedule.work); + /* If the device is unplugged or !netif_running(), the workqueue + * doesn't need to wake the device, and could return directly. + */ + if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev)) + return; + if (usb_autopm_get_interface(tp->intf) < 0) return; if (!test_bit(WORK_ENABLE, &tp->flags)) goto out1; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) - goto out1; - if (!mutex_trylock(&tp->control)) { schedule_delayed_work(&tp->schedule, 0); goto out1; @@ -2933,6 +2960,8 @@ static int rtl8152_open(struct net_device *netdev) netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", res); free_all_mem(tp); + } else { + tasklet_enable(&tp->tl); } mutex_unlock(&tp->control); @@ -2948,6 +2977,7 @@ static int rtl8152_close(struct net_device *netdev) struct r8152 *tp = netdev_priv(netdev); int res = 0; + tasklet_disable(&tp->tl); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); @@ -2965,9 +2995,7 @@ static int rtl8152_close(struct net_device *netdev) */ rtl_runtime_suspend_enable(tp, false); - tasklet_disable(&tp->tl); tp->rtl_ops.down(tp); - tasklet_enable(&tp->tl); mutex_unlock(&tp->control); @@ -3427,7 +3455,7 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev, data[9] = le64_to_cpu(tally.rx_broadcast); data[10] = le32_to_cpu(tally.rx_multicast); data[11] = le16_to_cpu(tally.tx_aborted); - data[12] = le16_to_cpu(tally.tx_underun); + data[12] = le16_to_cpu(tally.tx_underrun); } static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -3565,11 +3593,33 @@ out: return ret; } +static int rtl8152_nway_reset(struct net_device *dev) +{ + struct r8152 *tp = netdev_priv(dev); + int ret; + + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out; + + mutex_lock(&tp->control); + + ret = mii_nway_restart(&tp->mii); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + +out: + return ret; +} + static struct ethtool_ops ops = { .get_drvinfo = rtl8152_get_drvinfo, .get_settings = rtl8152_get_settings, .set_settings = rtl8152_set_settings, .get_link = ethtool_op_get_link, + .nway_reset = rtl8152_nway_reset, .get_msglevel = rtl8152_get_msglevel, .set_msglevel = rtl8152_set_msglevel, .get_wol = rtl8152_get_wol, @@ -3709,66 +3759,43 @@ static void rtl8153_unload(struct r8152 *tp) r8153_power_cut_en(tp, false); } -static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) +static int rtl_ops_init(struct r8152 *tp) { struct rtl_ops *ops = &tp->rtl_ops; - int ret = -ENODEV; - - switch (id->idVendor) { - case VENDOR_ID_REALTEK: - switch (id->idProduct) { - case PRODUCT_ID_RTL8152: - ops->init = r8152b_init; - ops->enable = rtl8152_enable; - ops->disable = rtl8152_disable; - ops->up = rtl8152_up; - ops->down = rtl8152_down; - ops->unload = rtl8152_unload; - ops->eee_get = r8152_get_eee; - ops->eee_set = r8152_set_eee; - ret = 0; - break; - case PRODUCT_ID_RTL8153: - ops->init = r8153_init; - ops->enable = rtl8153_enable; - ops->disable = rtl8153_disable; - ops->up = rtl8153_up; - ops->down = rtl8153_down; - ops->unload = rtl8153_unload; - ops->eee_get = r8153_get_eee; - ops->eee_set = r8153_set_eee; - ret = 0; - break; - default: - break; - } + int ret = 0; + + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + ops->init = r8152b_init; + ops->enable = rtl8152_enable; + ops->disable = rtl8152_disable; + ops->up = rtl8152_up; + ops->down = rtl8152_down; + ops->unload = rtl8152_unload; + ops->eee_get = r8152_get_eee; + ops->eee_set = r8152_set_eee; break; - case VENDOR_ID_SAMSUNG: - switch (id->idProduct) { - case PRODUCT_ID_SAMSUNG: - ops->init = r8153_init; - ops->enable = rtl8153_enable; - ops->disable = rtl8153_disable; - ops->up = rtl8153_up; - ops->down = rtl8153_down; - ops->unload = rtl8153_unload; - ops->eee_get = r8153_get_eee; - ops->eee_set = r8153_set_eee; - ret = 0; - break; - default: - break; - } + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + ops->init = r8153_init; + ops->enable = rtl8153_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8153_up; + ops->down = rtl8153_down; + ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8153_set_eee; break; default: + ret = -ENODEV; + netif_err(tp, probe, tp->netdev, "Unknown Device\n"); break; } - if (ret) - netif_err(tp, probe, tp->netdev, "Unknown Device\n"); - return ret; } @@ -3800,7 +3827,8 @@ static int rtl8152_probe(struct usb_interface *intf, tp->netdev = netdev; tp->intf = intf; - ret = rtl_ops_init(tp, id); + r8152b_get_version(tp); + ret = rtl_ops_init(tp); if (ret) goto out; @@ -3833,11 +3861,9 @@ static int rtl8152_probe(struct usb_interface *intf, tp->mii.phy_id_mask = 0x3f; tp->mii.reg_num_mask = 0x1f; tp->mii.phy_id = R8152_PHY_ID; - tp->mii.supports_gmii = 0; intf->needs_remote_wakeup = 1; - r8152b_get_version(tp); tp->rtl_ops.init(tp); set_ethernet_addr(tp); @@ -3855,12 +3881,15 @@ static int rtl8152_probe(struct usb_interface *intf, else device_set_wakeup_enable(&udev->dev, false); + tasklet_disable(&tp->tl); + netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); return 0; out1: usb_set_intfdata(intf, NULL); + tasklet_kill(&tp->tl); out: free_netdev(netdev); return ret; @@ -3884,11 +3913,27 @@ static void rtl8152_disconnect(struct usb_interface *intf) } } +#define REALTEK_USB_DEVICE(vend, prod) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ + USB_DEVICE_ID_MATCH_INT_CLASS, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bInterfaceClass = USB_CLASS_VENDOR_SPEC \ +}, \ +{ \ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | \ + USB_DEVICE_ID_MATCH_DEVICE, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* table of devices that work with this driver */ static struct usb_device_id rtl8152_table[] = { - {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, - {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, - {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, + {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {} }; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 6e87e5710048cf952c1505950d954a493b6b8878..d37b7dce2d405af079e8067ad032f0acdbdbbf9b 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -753,14 +753,13 @@ static int rtl8150_open(struct net_device *netdev) static int rtl8150_close(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); - int res = 0; netif_stop_queue(netdev); if (!test_bit(RTL8150_UNPLUG, &dev->flags)) disable_net_traffic(dev); unlink_all_urbs(dev); - return res; + return 0; } static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index d07bf4cb893f878c2a2a7f40e5c5691304fbdbf5..26423adc35ee892c6a520cc243f8e71680a71eaf 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1670,12 +1670,14 @@ done: static int smsc95xx_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); - struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); - u8 suspend_flags = pdata->suspend_flags; + struct smsc95xx_priv *pdata; + u8 suspend_flags; int ret; u32 val; BUG_ON(!dev); + pdata = (struct smsc95xx_priv *)(dev->data[0]); + suspend_flags = pdata->suspend_flags; netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 6dfcbf523936ef69527f5d23168dbf51ee638094..afd295348ddbe0eb968cd884f657b2b7c2857ebe 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2199,13 +2199,6 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) if (adapter->rss) { struct UPT1_RSSConf *rssConf = adapter->rss_conf; - static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = { - 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac, - 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28, - 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70, - 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3, - 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9, - }; devRead->misc.uptFeatures |= UPT1_F_RSS; devRead->misc.numRxQueues = adapter->num_rx_queues; @@ -2216,7 +2209,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; - memcpy(rssConf->hashKey, rss_key, sizeof(rss_key)); + netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey)); for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = ethtool_rxfh_indir_default( diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index b725fd9e7803a4b41f8c548fb01855ee604a0bea..b7b53329d5751676b6a9daf4c9cff9bb4ce923ad 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -583,12 +583,16 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev) } static int -vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key) +vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; unsigned int n = rssConf->indTableSize; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; while (n--) p[n] = rssConf->indTable[n]; return 0; @@ -596,13 +600,20 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key) } static int -vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key) +vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key, + const u8 hfunc) { unsigned int i; unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!p) + return 0; for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = p[i]; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index be4649a49c5e8bb2bec68aca08d27a82f5563d40..49d9f229199851c48f5a9e6f1b282b42cedc2a41 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -849,7 +849,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, /* Add static entry (via netlink) */ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 flags) + const unsigned char *addr, u16 vid, u16 flags) { struct vxlan_dev *vxlan = netdev_priv(dev); /* struct net *net = dev_net(vxlan->dev); */ @@ -885,7 +885,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* Delete entry (via netlink) */ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr) + const unsigned char *addr, u16 vid) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; @@ -1593,14 +1593,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, if (unlikely(err)) return err; - if (vlan_tx_tag_present(skb)) { - if (WARN_ON(!__vlan_put_tag(skb, - skb->vlan_proto, - vlan_tx_tag_get(skb)))) - return -ENOMEM; - - skb->vlan_tci = 0; - } + skb = vlan_hwaccel_push_inside(skb); + if (WARN_ON(!skb)) + return -ENOMEM; vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); @@ -1637,14 +1632,9 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, if (unlikely(err)) return err; - if (vlan_tx_tag_present(skb)) { - if (WARN_ON(!__vlan_put_tag(skb, - skb->vlan_proto, - vlan_tx_tag_get(skb)))) - return -ENOMEM; - - skb->vlan_tci = 0; - } + skb = vlan_hwaccel_push_inside(skb); + if (WARN_ON(!skb)) + return -ENOMEM; vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); @@ -1995,9 +1985,8 @@ static int vxlan_init(struct net_device *dev) spin_lock(&vn->sock_lock); vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, vxlan->dst_port); - if (vs) { + if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) { /* If we have a socket with same port already, reuse it */ - atomic_inc(&vs->refcnt); vxlan_vs_add_dev(vs, vxlan); } else { /* otherwise make new socket outside of RTNL */ @@ -2236,6 +2225,9 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, + [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, + [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, + [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -2396,12 +2388,9 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, spin_lock(&vn->sock_lock); vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); - if (vs) { - if (vs->rcv == rcv) - atomic_inc(&vs->refcnt); - else + if (vs && ((vs->rcv != rcv) || + !atomic_add_unless(&vs->refcnt, 1, 0))) vs = ERR_PTR(-EBUSY); - } spin_unlock(&vn->sock_lock); if (!vs) diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index 86907e5ba6ca21a5e7e6d8ff8b6b20499e004cce..ccba4fea7269e35ecf2df7d95fb8b6af1aee507a 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -135,6 +135,11 @@ struct ath_ops { struct ath_common; struct ath_bus_ops; +struct ath_ps_ops { + void (*wakeup)(struct ath_common *common); + void (*restore)(struct ath_common *common); +}; + struct ath_common { void *ah; void *priv; @@ -148,7 +153,7 @@ struct ath_common { u16 cachelsz; u16 curaid; u8 macaddr[ETH_ALEN]; - u8 curbssid[ETH_ALEN]; + u8 curbssid[ETH_ALEN] __aligned(2); u8 bssidmask[ETH_ALEN]; u32 rx_bufsize; @@ -169,6 +174,7 @@ struct ath_common { struct ath_regulatory reg_world_copy; const struct ath_ops *ops; const struct ath_bus_ops *bus_ops; + const struct ath_ps_ops *ps_ops; bool btcoex_enabled; bool disable_ani; @@ -178,6 +184,11 @@ struct ath_common { struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; }; +static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common) +{ + return common->ps_ops; +} + struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, u32 len, gfp_t gfp_mask); diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 101cadb6e4ba8c3659200093f5da739c4a501236..a156e6e48708f96bada8aec2aa17f43d4e052690 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -443,12 +443,12 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) * Guts of ath10k_ce_completed_recv_next. * The caller takes responsibility for any necessary locking. */ -static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, - void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp) +int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp) { struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; @@ -558,6 +558,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, /* sanity */ dest_ring->per_transfer_context[sw_index] = NULL; + desc->nbytes = 0; /* Update sw_index */ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); @@ -576,11 +577,11 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, * Guts of ath10k_ce_completed_send_next. * The caller takes responsibility for any necessary locking. */ -static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, - void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp) +int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp) { struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 ctrl_addr = ce_state->ctrl_addr; @@ -817,7 +818,10 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ce_id; - for (ce_id = 0; ce_id < CE_COUNT; ce_id++) + /* Skip the last copy engine, CE7 the diagnostic window, as that + * uses polling and isn't initialized for interrupts. + */ + for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]); } @@ -832,8 +836,8 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, nentries = roundup_pow_of_two(attr->src_nentries); - memset(src_ring->per_transfer_context, 0, - nentries * sizeof(*src_ring->per_transfer_context)); + memset(src_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc)); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index &= src_ring->nentries_mask; @@ -869,8 +873,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, nentries = roundup_pow_of_two(attr->dest_nentries); - memset(dest_ring->per_transfer_context, 0, - nentries * sizeof(*dest_ring->per_transfer_context)); + memset(dest_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc)); dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); dest_ring->sw_index &= dest_ring->nentries_mask; @@ -1020,37 +1024,10 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, * initialized by software/firmware. */ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, - const struct ce_attr *attr, - void (*send_cb)(struct ath10k_ce_pipe *), - void (*recv_cb)(struct ath10k_ce_pipe *)) + const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; int ret; - /* - * Make sure there's enough CE ringbuffer entries for HTT TX to avoid - * additional TX locking checks. - * - * For the lack of a better place do the check here. - */ - BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC > - (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > - (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - - spin_lock_bh(&ar_pci->ce_lock); - ce_state->ar = ar; - ce_state->id = ce_id; - ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); - ce_state->attr_flags = attr->flags; - ce_state->src_sz_max = attr->src_sz_max; - if (attr->src_nentries) - ce_state->send_cb = send_cb; - if (attr->dest_nentries) - ce_state->recv_cb = recv_cb; - spin_unlock_bh(&ar_pci->ce_lock); - if (attr->src_nentries) { ret = ath10k_ce_init_src_ring(ar, ce_id, attr); if (ret) { @@ -1098,12 +1075,37 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) } int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, - const struct ce_attr *attr) + const struct ce_attr *attr, + void (*send_cb)(struct ath10k_ce_pipe *), + void (*recv_cb)(struct ath10k_ce_pipe *)) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; int ret; + /* + * Make sure there's enough CE ringbuffer entries for HTT TX to avoid + * additional TX locking checks. + * + * For the lack of a better place do the check here. + */ + BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + + ce_state->ar = ar; + ce_state->id = ce_id; + ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); + ce_state->attr_flags = attr->flags; + ce_state->src_sz_max = attr->src_sz_max; + + if (attr->src_nentries) + ce_state->send_cb = send_cb; + + if (attr->dest_nentries) + ce_state->recv_cb = recv_cb; + if (attr->src_nentries) { ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); if (IS_ERR(ce_state->src_ring)) { diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 329b7340fa72cc5157dec176686f4f19a5099629..617a151e8ce4e7f9217d836d1dcfc0483dc966d0 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -192,15 +192,21 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, unsigned int *nbytesp, unsigned int *transfer_idp); +int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp); + /*==================CE Engine Initialization=======================*/ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, - const struct ce_attr *attr, - void (*send_cb)(struct ath10k_ce_pipe *), - void (*recv_cb)(struct ath10k_ce_pipe *)); + const struct ce_attr *attr); void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, - const struct ce_attr *attr); + const struct ce_attr *attr, + void (*send_cb)(struct ath10k_ce_pipe *), + void (*recv_cb)(struct ath10k_ce_pipe *)); void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id); /*==================CE Engine Shutdown=======================*/ @@ -213,6 +219,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp); +int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp); + /* * Support clean shutdown by allowing the caller to cancel * pending sends. Target DMA must be stopped before using diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index cee18c89d7f23ef77ad7a8a2265ca8efa887f9dd..7762061a194458b4982326d7edaea90700a57d8b 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -31,12 +31,17 @@ unsigned int ath10k_debug_mask; static bool uart_print; static unsigned int ath10k_p2p; +static bool skip_otp; + module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); module_param(uart_print, bool, 0644); module_param_named(p2p, ath10k_p2p, uint, 0644); +module_param(skip_otp, bool, 0644); + MODULE_PARM_DESC(debug_mask, "Debugging mask"); MODULE_PARM_DESC(uart_print, "Uart target debugging"); MODULE_PARM_DESC(p2p, "Enable ath10k P2P support"); +MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { @@ -138,7 +143,8 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, return fw; } -static int ath10k_push_board_ext_data(struct ath10k *ar) +static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data, + size_t data_len) { u32 board_data_size = QCA988X_BOARD_DATA_SZ; u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; @@ -159,14 +165,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) if (board_ext_data_addr == 0) return 0; - if (ar->board_len != (board_data_size + board_ext_data_size)) { + if (data_len != (board_data_size + board_ext_data_size)) { ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n", - ar->board_len, board_data_size, board_ext_data_size); + data_len, board_data_size, board_ext_data_size); return -EINVAL; } ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, - ar->board_data + board_data_size, + data + board_data_size, board_ext_data_size); if (ret) { ath10k_err(ar, "could not write board ext data (%d)\n", ret); @@ -184,13 +190,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) return 0; } -static int ath10k_download_board_data(struct ath10k *ar) +static int ath10k_download_board_data(struct ath10k *ar, const void *data, + size_t data_len) { u32 board_data_size = QCA988X_BOARD_DATA_SZ; u32 address; int ret; - ret = ath10k_push_board_ext_data(ar); + ret = ath10k_push_board_ext_data(ar, data, data_len); if (ret) { ath10k_err(ar, "could not push board ext data (%d)\n", ret); goto exit; @@ -202,9 +209,9 @@ static int ath10k_download_board_data(struct ath10k *ar) goto exit; } - ret = ath10k_bmi_write_memory(ar, address, ar->board_data, + ret = ath10k_bmi_write_memory(ar, address, data, min_t(u32, board_data_size, - ar->board_len)); + data_len)); if (ret) { ath10k_err(ar, "could not write board data (%d)\n", ret); goto exit; @@ -220,11 +227,39 @@ exit: return ret; } +static int ath10k_download_cal_file(struct ath10k *ar) +{ + int ret; + + if (!ar->cal_file) + return -ENOENT; + + if (IS_ERR(ar->cal_file)) + return PTR_ERR(ar->cal_file); + + ret = ath10k_download_board_data(ar, ar->cal_file->data, + ar->cal_file->size); + if (ret) { + ath10k_err(ar, "failed to download cal_file data: %d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n"); + + return 0; +} + static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; int ret; + ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len); + if (ret) { + ath10k_err(ar, "failed to download board data: %d\n", ret); + return ret; + } + /* OTP is optional */ if (!ar->otp_data || !ar->otp_len) { @@ -250,7 +285,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); - if (result != 0) { + if (!skip_otp && result != 0) { ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; } @@ -308,6 +343,9 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) if (ar->firmware && !IS_ERR(ar->firmware)) release_firmware(ar->firmware); + if (ar->cal_file && !IS_ERR(ar->cal_file)) + release_firmware(ar->cal_file); + ar->board = NULL; ar->board_data = NULL; ar->board_len = 0; @@ -319,6 +357,27 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) ar->firmware = NULL; ar->firmware_data = NULL; ar->firmware_len = 0; + + ar->cal_file = NULL; +} + +static int ath10k_fetch_cal_file(struct ath10k *ar) +{ + char filename[100]; + + /* cal--.bin */ + scnprintf(filename, sizeof(filename), "cal-%s-%s.bin", + ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); + + ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename); + if (IS_ERR(ar->cal_file)) + /* calibration file is optional, don't print any warnings */ + return PTR_ERR(ar->cal_file); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n", + ATH10K_FW_DIR, filename); + + return 0; } static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) @@ -562,6 +621,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) { int ret; + /* calibration file is optional, don't check for any errors */ + ath10k_fetch_cal_file(ar); + ar->fw_api = 3; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); @@ -589,30 +651,32 @@ success: return 0; } -static int ath10k_init_download_firmware(struct ath10k *ar, - enum ath10k_firmware_mode mode) +static int ath10k_download_cal_data(struct ath10k *ar) { int ret; - ret = ath10k_download_board_data(ar); - if (ret) { - ath10k_err(ar, "failed to download board data: %d\n", ret); - return ret; + ret = ath10k_download_cal_file(ar); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_FILE; + goto done; } + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a calibration file, try OTP next: %d\n", + ret); + ret = ath10k_download_and_run_otp(ar); if (ret) { ath10k_err(ar, "failed to run otp: %d\n", ret); return ret; } - ret = ath10k_download_fw(ar, mode); - if (ret) { - ath10k_err(ar, "failed to download firmware: %d\n", ret); - return ret; - } + ar->cal_mode = ATH10K_CAL_MODE_OTP; - return ret; +done: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n", + ath10k_cal_mode_str(ar->cal_mode)); + return 0; } static int ath10k_init_uart(struct ath10k *ar) @@ -685,6 +749,25 @@ static void ath10k_core_restart(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, restart_work); + set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); + + /* Place a barrier to make sure the compiler doesn't reorder + * CRASH_FLUSH and calling other functions. + */ + barrier(); + + ieee80211_stop_queues(ar->hw); + ath10k_drain_tx(ar); + complete_all(&ar->scan.started); + complete_all(&ar->scan.completed); + complete_all(&ar->scan.on_channel); + complete_all(&ar->offchan_tx_completed); + complete_all(&ar->install_key_done); + complete_all(&ar->vdev_setup_done); + wake_up(&ar->htt.empty_tx_wq); + wake_up(&ar->wmi.tx_credits_wq); + wake_up(&ar->peer_mapping_wq); + mutex_lock(&ar->conf_mutex); switch (ar->state) { @@ -716,12 +799,25 @@ static void ath10k_core_restart(struct work_struct *work) mutex_unlock(&ar->conf_mutex); } +static void ath10k_core_init_max_sta_count(struct ath10k *ar) +{ + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + ar->max_num_peers = TARGET_10X_NUM_PEERS; + ar->max_num_stations = TARGET_10X_NUM_STATIONS; + } else { + ar->max_num_peers = TARGET_NUM_PEERS; + ar->max_num_stations = TARGET_NUM_STATIONS; + } +} + int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) { int status; lockdep_assert_held(&ar->conf_mutex); + clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); + ath10k_bmi_start(ar); if (ath10k_init_configure_target(ar)) { @@ -729,7 +825,11 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) goto err; } - status = ath10k_init_download_firmware(ar, mode); + status = ath10k_download_cal_data(ar); + if (status) + goto err; + + status = ath10k_download_fw(ar, mode); if (status) goto err; @@ -846,9 +946,9 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) goto err_hif_stop; if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1; + ar->free_vdev_map = (1LL << TARGET_10X_NUM_VDEVS) - 1; else - ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; + ar->free_vdev_map = (1LL << TARGET_NUM_VDEVS) - 1; INIT_LIST_HEAD(&ar->arvifs); @@ -946,6 +1046,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar) return ret; } + ath10k_core_init_max_sta_count(ar); + mutex_lock(&ar->conf_mutex); ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); @@ -1084,6 +1186,7 @@ void ath10k_core_unregister(struct ath10k *ar) EXPORT_SYMBOL(ath10k_core_unregister); struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, + enum ath10k_bus bus, const struct ath10k_hif_ops *hif_ops) { struct ath10k *ar; @@ -1100,6 +1203,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ar->dev = dev; ar->hif.ops = hif_ops; + ar->hif.bus = bus; init_completion(&ar->scan.started); init_completion(&ar->scan.completed); @@ -1120,6 +1224,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, INIT_LIST_HEAD(&ar->peers); init_waitqueue_head(&ar->peer_mapping_wq); + init_waitqueue_head(&ar->htt.empty_tx_wq); + init_waitqueue_head(&ar->wmi.tx_credits_wq); init_completion(&ar->offchan_tx_completed); INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index fe531ea6926cce4fc2076dd132455440cb647454..514c219263a5905a11288f842789133c01f49350 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -63,12 +63,28 @@ struct ath10k; +enum ath10k_bus { + ATH10K_BUS_PCI, +}; + +static inline const char *ath10k_bus_str(enum ath10k_bus bus) +{ + switch (bus) { + case ATH10K_BUS_PCI: + return "pci"; + } + + return "unknown"; +} + struct ath10k_skb_cb { dma_addr_t paddr; + u8 eid; u8 vdev_id; struct { u8 tid; + u16 freq; bool is_offchan; struct ath10k_htt_txbuf *txbuf; u32 txbuf_paddr; @@ -96,8 +112,6 @@ struct ath10k_bmi { bool done_sent; }; -#define ATH10K_MAX_MEM_REQS 16 - struct ath10k_mem_chunk { void *vaddr; dma_addr_t paddr; @@ -110,22 +124,27 @@ struct ath10k_wmi { struct completion service_ready; struct completion unified_ready; wait_queue_head_t tx_credits_wq; + DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX); struct wmi_cmd_map *cmd; struct wmi_vdev_param_map *vdev_param; struct wmi_pdev_param_map *pdev_param; u32 num_mem_chunks; - struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS]; + struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS]; }; -struct ath10k_peer_stat { +struct ath10k_fw_stats_peer { + struct list_head list; + u8 peer_macaddr[ETH_ALEN]; u32 peer_rssi; u32 peer_tx_rate; u32 peer_rx_rate; /* 10x only */ }; -struct ath10k_target_stats { +struct ath10k_fw_stats_pdev { + struct list_head list; + /* PDEV stats */ s32 ch_noise_floor; u32 tx_frame_count; @@ -180,15 +199,11 @@ struct ath10k_target_stats { s32 phy_errs; s32 phy_err_drop; s32 mpdu_errs; +}; - /* VDEV STATS */ - - /* PEER STATS */ - u8 peers; - struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS]; - - /* TODO: Beacon filter stats */ - +struct ath10k_fw_stats { + struct list_head pdevs; + struct list_head peers; }; struct ath10k_dfs_stats { @@ -206,6 +221,8 @@ struct ath10k_peer { int vdev_id; u8 addr[ETH_ALEN]; DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); + + /* protected by ar->data_lock */ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; }; @@ -234,6 +251,8 @@ struct ath10k_vif { struct sk_buff *beacon; /* protected by data_lock */ bool beacon_sent; + void *beacon_buf; + dma_addr_t beacon_paddr; struct ath10k *ar; struct ieee80211_vif *vif; @@ -273,6 +292,7 @@ struct ath10k_vif { u8 force_sgi; bool use_cts_prot; int num_legacy_stations; + int txpower; }; struct ath10k_vif_iter { @@ -292,17 +312,19 @@ struct ath10k_fw_crash_data { struct ath10k_debug { struct dentry *debugfs_phy; - struct ath10k_target_stats target_stats; - DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_MAX); - - struct completion event_stats_compl; + struct ath10k_fw_stats fw_stats; + struct completion fw_stats_complete; + bool fw_stats_done; unsigned long htt_stats_mask; struct delayed_work htt_stats_dwork; struct ath10k_dfs_stats dfs_stats; struct ath_dfs_pool_stats dfs_pool_stats; + /* protected by conf_mutex */ u32 fw_dbglog_mask; + u32 pktlog_filter; + u32 reg_addr; u8 htt_max_amsdu; u8 htt_max_ampdu; @@ -321,7 +343,7 @@ enum ath10k_state { * stopped in ath10k_core_restart() work holding conf_mutex. The state * RESTARTED means that the device is up and mac80211 has started hw * reconfiguration. Once mac80211 is done with the reconfiguration we - * set the state to STATE_ON in restart_complete(). */ + * set the state to STATE_ON in reconfig_complete(). */ ATH10K_STATE_RESTARTING, ATH10K_STATE_RESTARTED, @@ -369,8 +391,30 @@ enum ath10k_dev_flags { /* Indicates that ath10k device is during CAC phase of DFS */ ATH10K_CAC_RUNNING, ATH10K_FLAG_CORE_REGISTERED, + + /* Device has crashed and needs to restart. This indicates any pending + * waiters should immediately cancel instead of waiting for a time out. + */ + ATH10K_FLAG_CRASH_FLUSH, }; +enum ath10k_cal_mode { + ATH10K_CAL_MODE_FILE, + ATH10K_CAL_MODE_OTP, +}; + +static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) +{ + switch (mode) { + case ATH10K_CAL_MODE_FILE: + return "file"; + case ATH10K_CAL_MODE_OTP: + return "otp"; + } + + return "unknown"; +} + enum ath10k_scan_state { ATH10K_SCAN_IDLE, ATH10K_SCAN_STARTING, @@ -421,6 +465,7 @@ struct ath10k { bool p2p; struct { + enum ath10k_bus bus; const struct ath10k_hif_ops *ops; } hif; @@ -456,7 +501,10 @@ struct ath10k { const void *firmware_data; size_t firmware_len; + const struct firmware *cal_file; + int fw_api; + enum ath10k_cal_mode cal_mode; struct { struct completion started; @@ -482,7 +530,7 @@ struct ath10k { /* current operating channel definition */ struct cfg80211_chan_def chandef; - int free_vdev_map; + unsigned long long free_vdev_map; bool monitor; int monitor_vdev_id; bool monitor_started; @@ -517,8 +565,12 @@ struct ath10k { struct list_head peers; wait_queue_head_t peer_mapping_wq; - /* number of created peers; protected by data_lock */ + /* protected by conf_mutex */ int num_peers; + int num_stations; + + int max_num_peers; + int max_num_stations; struct work_struct offchan_tx_work; struct sk_buff_head offchan_tx_queue; @@ -563,11 +615,19 @@ struct ath10k { bool utf_monitor; } testmode; + struct { + /* protected by data_lock */ + u32 fw_crash_counter; + u32 fw_warm_reset_counter; + u32 fw_cold_reset_counter; + } stats; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, + enum ath10k_bus bus, const struct ath10k_hif_ops *hif_ops); void ath10k_core_destroy(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 3756feba32231cffcddd112575163d1a7cf6cc67..a716758f14b038b4052ebb2aa6a67f3cad56db67 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -17,12 +17,12 @@ #include #include -#include -#include #include +#include #include "core.h" #include "debug.h" +#include "hif.h" /* ms */ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 @@ -106,34 +106,37 @@ struct ath10k_dump_file_data { u8 data[0]; } __packed; -int ath10k_info(struct ath10k *ar, const char *fmt, ...) +void ath10k_info(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = dev_info(ar->dev, "%pV", &vaf); + dev_info(ar->dev, "%pV", &vaf); trace_ath10k_log_info(ar, &vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath10k_info); void ath10k_print_driver_info(struct ath10k *ar) { - ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n", + ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d.%d.%d.%d cal %s max_sta %d\n", ar->hw_params.name, ar->target_version, ar->chip_id, ar->hw->wiphy->fw_version, ar->fw_api, ar->htt.target_version_major, - ar->htt.target_version_minor); + ar->htt.target_version_minor, + ar->fw_version_major, + ar->fw_version_minor, + ar->fw_version_release, + ar->fw_version_build, + ath10k_cal_mode_str(ar->cal_mode), + ar->max_num_stations); ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", config_enabled(CONFIG_ATH10K_DEBUG), config_enabled(CONFIG_ATH10K_DEBUGFS), @@ -143,25 +146,22 @@ void ath10k_print_driver_info(struct ath10k *ar) } EXPORT_SYMBOL(ath10k_print_driver_info); -int ath10k_err(struct ath10k *ar, const char *fmt, ...) +void ath10k_err(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = dev_err(ar->dev, "%pV", &vaf); + dev_err(ar->dev, "%pV", &vaf); trace_ath10k_log_err(ar, &vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath10k_err); -int ath10k_warn(struct ath10k *ar, const char *fmt, ...) +void ath10k_warn(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, @@ -174,20 +174,11 @@ int ath10k_warn(struct ath10k *ar, const char *fmt, ...) trace_ath10k_log_warn(ar, &vaf); va_end(args); - - return 0; } EXPORT_SYMBOL(ath10k_warn); #ifdef CONFIG_ATH10K_DEBUGFS -void ath10k_debug_read_service_map(struct ath10k *ar, - void *service_map, - size_t map_size) -{ - memcpy(ar->debug.wmi_service_bitmap, service_map, map_size); -} - static ssize_t ath10k_read_wmi_services(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -209,8 +200,9 @@ static ssize_t ath10k_read_wmi_services(struct file *file, if (len > buf_len) len = buf_len; + spin_lock_bh(&ar->data_lock); for (i = 0; i < WMI_SERVICE_MAX; i++) { - enabled = test_bit(i, ar->debug.wmi_service_bitmap); + enabled = test_bit(i, ar->wmi.svc_map); name = wmi_service_name(i); if (!name) { @@ -226,6 +218,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file, "%-40s %s\n", name, enabled ? "enabled" : "-"); } + spin_unlock_bh(&ar->data_lock); ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -242,169 +235,182 @@ static const struct file_operations fops_wmi_services = { .llseek = default_llseek, }; -void ath10k_debug_read_target_stats(struct ath10k *ar, - struct wmi_stats_event *ev) +static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head) { - u8 *tmp = ev->data; - struct ath10k_target_stats *stats; - int num_pdev_stats, num_vdev_stats, num_peer_stats; - struct wmi_pdev_stats_10x *ps; - int i; + struct ath10k_fw_stats_pdev *i, *tmp; + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath10k_debug_fw_stats_peers_free(struct list_head *head) +{ + struct ath10k_fw_stats_peer *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath10k_debug_fw_stats_reset(struct ath10k *ar) +{ spin_lock_bh(&ar->data_lock); + ar->debug.fw_stats_done = false; + ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); + ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers); + spin_unlock_bh(&ar->data_lock); +} - stats = &ar->debug.target_stats; - - num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */ - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */ - num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */ - - if (num_pdev_stats) { - ps = (struct wmi_pdev_stats_10x *)tmp; - - stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf); - stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count); - stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count); - stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count); - stats->cycle_count = __le32_to_cpu(ps->cycle_count); - stats->phy_err_count = __le32_to_cpu(ps->phy_err_count); - stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr); - - stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued); - stats->comp_delivered = - __le32_to_cpu(ps->wal.tx.comp_delivered); - stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued); - stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued); - stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop); - stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued); - stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed); - stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued); - stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped); - stats->underrun = __le32_to_cpu(ps->wal.tx.underrun); - stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort); - stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed); - stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko); - stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc); - stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers); - stats->sw_retry_failure = - __le32_to_cpu(ps->wal.tx.sw_retry_failure); - stats->illgl_rate_phy_err = - __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err); - stats->pdev_cont_xretry = - __le32_to_cpu(ps->wal.tx.pdev_cont_xretry); - stats->pdev_tx_timeout = - __le32_to_cpu(ps->wal.tx.pdev_tx_timeout); - stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets); - stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun); - stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf); - - stats->mid_ppdu_route_change = - __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change); - stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd); - stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags); - stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags); - stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags); - stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags); - stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus); - stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus); - stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus); - stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus); - stats->oversize_amsdu = - __le32_to_cpu(ps->wal.rx.oversize_amsdu); - stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs); - stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop); - stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs); - - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, - ar->fw_features)) { - stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad); - stats->rts_bad = __le32_to_cpu(ps->rts_bad); - stats->rts_good = __le32_to_cpu(ps->rts_good); - stats->fcs_bad = __le32_to_cpu(ps->fcs_bad); - stats->no_beacons = __le32_to_cpu(ps->no_beacons); - stats->mib_int_count = __le32_to_cpu(ps->mib_int_count); - tmp += sizeof(struct wmi_pdev_stats_10x); - } else { - tmp += sizeof(struct wmi_pdev_stats_old); - } +static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head) +{ + struct ath10k_fw_stats_peer *i; + size_t num = 0; + + list_for_each_entry(i, head, list) + ++num; + + return num; +} + +void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_fw_stats stats = {}; + bool is_start, is_started, is_end; + size_t num_peers; + int ret; + + INIT_LIST_HEAD(&stats.pdevs); + INIT_LIST_HEAD(&stats.peers); + + spin_lock_bh(&ar->data_lock); + ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats); + if (ret) { + ath10k_warn(ar, "failed to pull fw stats: %d\n", ret); + goto unlock; } - /* 0 or max vdevs */ - /* Currently firmware does not support VDEV stats */ - if (num_vdev_stats) { - struct wmi_vdev_stats *vdev_stats; + /* Stat data may exceed htc-wmi buffer limit. In such case firmware + * splits the stats data and delivers it in a ping-pong fashion of + * request cmd-update event. + * + * However there is no explicit end-of-data. Instead start-of-data is + * used as an implicit one. This works as follows: + * a) discard stat update events until one with pdev stats is + * delivered - this skips session started at end of (b) + * b) consume stat update events until another one with pdev stats is + * delivered which is treated as end-of-data and is itself discarded + */ - for (i = 0; i < num_vdev_stats; i++) { - vdev_stats = (struct wmi_vdev_stats *)tmp; - tmp += sizeof(struct wmi_vdev_stats); - } + if (ar->debug.fw_stats_done) { + ath10k_warn(ar, "received unsolicited stats update event\n"); + goto free; } - if (num_peer_stats) { - struct wmi_peer_stats_10x *peer_stats; - struct ath10k_peer_stat *s; - - stats->peers = num_peer_stats; - - for (i = 0; i < num_peer_stats; i++) { - peer_stats = (struct wmi_peer_stats_10x *)tmp; - s = &stats->peer_stat[i]; - - memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr, - ETH_ALEN); - s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi); - s->peer_tx_rate = - __le32_to_cpu(peer_stats->peer_tx_rate); - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, - ar->fw_features)) { - s->peer_rx_rate = - __le32_to_cpu(peer_stats->peer_rx_rate); - tmp += sizeof(struct wmi_peer_stats_10x); - - } else { - tmp += sizeof(struct wmi_peer_stats_old); - } + num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers); + is_start = (list_empty(&ar->debug.fw_stats.pdevs) && + !list_empty(&stats.pdevs)); + is_end = (!list_empty(&ar->debug.fw_stats.pdevs) && + !list_empty(&stats.pdevs)); + + if (is_start) + list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs); + + if (is_end) + ar->debug.fw_stats_done = true; + + is_started = !list_empty(&ar->debug.fw_stats.pdevs); + + if (is_started && !is_end) { + if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) { + /* Although this is unlikely impose a sane limit to + * prevent firmware from DoS-ing the host. + */ + ath10k_warn(ar, "dropping fw peer stats\n"); + goto free; } + + list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); } + complete(&ar->debug.fw_stats_complete); + +free: + /* In some cases lists have been spliced and cleared. Free up + * resources if that is not the case. + */ + ath10k_debug_fw_stats_pdevs_free(&stats.pdevs); + ath10k_debug_fw_stats_peers_free(&stats.peers); + +unlock: spin_unlock_bh(&ar->data_lock); - complete(&ar->debug.event_stats_compl); } -static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) +static int ath10k_debug_fw_stats_request(struct ath10k *ar) { - struct ath10k *ar = file->private_data; - struct ath10k_target_stats *fw_stats; - char *buf = NULL; - unsigned int len = 0, buf_len = 8000; - ssize_t ret_cnt = 0; - long left; - int i; + unsigned long timeout; int ret; - fw_stats = &ar->debug.target_stats; + lockdep_assert_held(&ar->conf_mutex); - mutex_lock(&ar->conf_mutex); + timeout = jiffies + msecs_to_jiffies(1*HZ); - if (ar->state != ATH10K_STATE_ON) - goto exit; + ath10k_debug_fw_stats_reset(ar); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - goto exit; + for (;;) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; - ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); - if (ret) { - ath10k_warn(ar, "could not request stats (%d)\n", ret); - goto exit; + reinit_completion(&ar->debug.fw_stats_complete); + + ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); + if (ret) { + ath10k_warn(ar, "could not request stats (%d)\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete, + 1*HZ); + if (ret <= 0) + return -ETIMEDOUT; + + spin_lock_bh(&ar->data_lock); + if (ar->debug.fw_stats_done) { + spin_unlock_bh(&ar->data_lock); + break; + } + spin_unlock_bh(&ar->data_lock); } - left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ); - if (left <= 0) - goto exit; + return 0; +} + +/* FIXME: How to calculate the buffer size sanely? */ +#define ATH10K_FW_STATS_BUF_SIZE (1024*1024) + +static void ath10k_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf) +{ + unsigned int len = 0; + unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE; + const struct ath10k_fw_stats_pdev *pdev; + const struct ath10k_fw_stats_peer *peer; + size_t num_peers; spin_lock_bh(&ar->data_lock); + + pdev = list_first_entry_or_null(&fw_stats->pdevs, + struct ath10k_fw_stats_pdev, list); + if (!pdev) { + ath10k_warn(ar, "failed to get pdev stats\n"); + goto unlock; + } + + num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers); + len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", "ath10k PDEV stats"); @@ -412,29 +418,29 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, "================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Channel noise floor", fw_stats->ch_noise_floor); + "Channel noise floor", pdev->ch_noise_floor); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "Channel TX power", fw_stats->chan_tx_power); + "Channel TX power", pdev->chan_tx_power); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "TX frame count", fw_stats->tx_frame_count); + "TX frame count", pdev->tx_frame_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RX frame count", fw_stats->rx_frame_count); + "RX frame count", pdev->rx_frame_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RX clear count", fw_stats->rx_clear_count); + "RX clear count", pdev->rx_clear_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "Cycle count", fw_stats->cycle_count); + "Cycle count", pdev->cycle_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "PHY error count", fw_stats->phy_err_count); + "PHY error count", pdev->phy_err_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RTS bad count", fw_stats->rts_bad); + "RTS bad count", pdev->rts_bad); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RTS good count", fw_stats->rts_good); + "RTS good count", pdev->rts_good); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "FCS bad count", fw_stats->fcs_bad); + "FCS bad count", pdev->fcs_bad); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "No beacon count", fw_stats->no_beacons); + "No beacon count", pdev->no_beacons); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "MIB int count", fw_stats->mib_int_count); + "MIB int count", pdev->mib_int_count); len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", @@ -443,51 +449,51 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, "================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HTT cookies queued", fw_stats->comp_queued); + "HTT cookies queued", pdev->comp_queued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HTT cookies disp.", fw_stats->comp_delivered); + "HTT cookies disp.", pdev->comp_delivered); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDU queued", fw_stats->msdu_enqued); + "MSDU queued", pdev->msdu_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU queued", fw_stats->mpdu_enqued); + "MPDU queued", pdev->mpdu_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs dropped", fw_stats->wmm_drop); + "MSDUs dropped", pdev->wmm_drop); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Local enqued", fw_stats->local_enqued); + "Local enqued", pdev->local_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Local freed", fw_stats->local_freed); + "Local freed", pdev->local_freed); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HW queued", fw_stats->hw_queued); + "HW queued", pdev->hw_queued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PPDUs reaped", fw_stats->hw_reaped); + "PPDUs reaped", pdev->hw_reaped); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Num underruns", fw_stats->underrun); + "Num underruns", pdev->underrun); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PPDUs cleaned", fw_stats->tx_abort); + "PPDUs cleaned", pdev->tx_abort); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs requed", fw_stats->mpdus_requed); + "MPDUs requed", pdev->mpdus_requed); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Excessive retries", fw_stats->tx_ko); + "Excessive retries", pdev->tx_ko); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HW rate", fw_stats->data_rc); + "HW rate", pdev->data_rc); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Sched self tiggers", fw_stats->self_triggers); + "Sched self tiggers", pdev->self_triggers); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Dropped due to SW retries", - fw_stats->sw_retry_failure); + pdev->sw_retry_failure); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Illegal rate phy errors", - fw_stats->illgl_rate_phy_err); + pdev->illgl_rate_phy_err); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Pdev continous xretry", fw_stats->pdev_cont_xretry); + "Pdev continous xretry", pdev->pdev_cont_xretry); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "TX timeout", fw_stats->pdev_tx_timeout); + "TX timeout", pdev->pdev_tx_timeout); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PDEV resets", fw_stats->pdev_resets); + "PDEV resets", pdev->pdev_resets); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY underrun", fw_stats->phy_underrun); + "PHY underrun", pdev->phy_underrun); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU is more than txop limit", fw_stats->txop_ovf); + "MPDU is more than txop limit", pdev->txop_ovf); len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", @@ -497,70 +503,161 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Mid PPDU route change", - fw_stats->mid_ppdu_route_change); + pdev->mid_ppdu_route_change); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Tot. number of statuses", fw_stats->status_rcvd); + "Tot. number of statuses", pdev->status_rcvd); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 0", fw_stats->r0_frags); + "Extra frags on rings 0", pdev->r0_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 1", fw_stats->r1_frags); + "Extra frags on rings 1", pdev->r1_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 2", fw_stats->r2_frags); + "Extra frags on rings 2", pdev->r2_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 3", fw_stats->r3_frags); + "Extra frags on rings 3", pdev->r3_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs delivered to HTT", fw_stats->htt_msdus); + "MSDUs delivered to HTT", pdev->htt_msdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs delivered to HTT", fw_stats->htt_mpdus); + "MPDUs delivered to HTT", pdev->htt_mpdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs delivered to stack", fw_stats->loc_msdus); + "MSDUs delivered to stack", pdev->loc_msdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs delivered to stack", fw_stats->loc_mpdus); + "MPDUs delivered to stack", pdev->loc_mpdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Oversized AMSUs", fw_stats->oversize_amsdu); + "Oversized AMSUs", pdev->oversize_amsdu); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY errors", fw_stats->phy_errs); + "PHY errors", pdev->phy_errs); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY errors drops", fw_stats->phy_err_drop); + "PHY errors drops", pdev->phy_err_drop); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs); + "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n", - "ath10k PEER stats", fw_stats->peers); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k PEER stats", num_peers); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================="); - for (i = 0; i < fw_stats->peers; i++) { + list_for_each_entry(peer, &fw_stats->peers, list) { len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", - "Peer MAC address", - fw_stats->peer_stat[i].peer_macaddr); + "Peer MAC address", peer->peer_macaddr); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer RSSI", fw_stats->peer_stat[i].peer_rssi); + "Peer RSSI", peer->peer_rssi); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer TX rate", - fw_stats->peer_stat[i].peer_tx_rate); + "Peer TX rate", peer->peer_tx_rate); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer RX rate", - fw_stats->peer_stat[i].peer_rx_rate); + "Peer RX rate", peer->peer_rx_rate); len += scnprintf(buf + len, buf_len - len, "\n"); } + +unlock: spin_unlock_bh(&ar->data_lock); - if (len > buf_len) - len = buf_len; + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} - ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); +static int ath10k_fw_stats_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + void *buf = NULL; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE); + if (!buf) { + ret = -ENOMEM; + goto err_unlock; + } + + ret = ath10k_debug_fw_stats_request(ar); + if (ret) { + ath10k_warn(ar, "failed to request fw stats: %d\n", ret); + goto err_free; + } + + ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf); + file->private_data = buf; -exit: mutex_unlock(&ar->conf_mutex); - kfree(buf); - return ret_cnt; + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_fw_stats_release(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + unsigned int len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_fw_stats = { - .read = ath10k_read_fw_stats, + .open = ath10k_fw_stats_open, + .release = ath10k_fw_stats_release, + .read = ath10k_fw_stats_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int ret, len, buf_len; + char *buf; + + buf_len = 500; + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + spin_lock_bh(&ar->data_lock); + + len = 0; + len += scnprintf(buf + len, buf_len - len, + "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter); + len += scnprintf(buf + len, buf_len - len, + "fw_warm_reset_counter\t\t%d\n", + ar->stats.fw_warm_reset_counter); + len += scnprintf(buf + len, buf_len - len, + "fw_cold_reset_counter\t\t%d\n", + ar->stats.fw_cold_reset_counter); + + spin_unlock_bh(&ar->data_lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + + return ret; +} + +static const struct file_operations fops_fw_reset_stats = { .open = simple_open, + .read = ath10k_debug_fw_reset_stats_read, .owner = THIS_MODULE, .llseek = default_llseek, }; @@ -593,7 +690,8 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file, "To simulate firmware crash write one of the keywords to this file:\n" "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n" "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n" - "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"; + "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n" + "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n"; return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } @@ -646,6 +744,10 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, } else if (!strcmp(buf, "assert")) { ath10k_info(ar, "simulating firmware assert crash\n"); ret = ath10k_debug_fw_assert(ar); + } else if (!strcmp(buf, "hw-restart")) { + ath10k_info(ar, "user requested hw restart\n"); + queue_work(ar->workqueue, &ar->restart_work); + ret = 0; } else { ret = -EINVAL; goto exit; @@ -759,8 +861,8 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, sizeof(dump_data->fw_ver)); - dump_data->kernel_ver_code = cpu_to_le32(LINUX_VERSION_CODE); - strlcpy(dump_data->kernel_ver, VERMAGIC_STRING, + dump_data->kernel_ver_code = 0; + strlcpy(dump_data->kernel_ver, init_utsname()->release, sizeof(dump_data->kernel_ver)); dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); @@ -822,6 +924,236 @@ static const struct file_operations fops_fw_crash_dump = { .llseek = default_llseek, }; +static ssize_t ath10k_reg_addr_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 buf[32]; + unsigned int len = 0; + u32 reg_addr; + + mutex_lock(&ar->conf_mutex); + reg_addr = ar->debug.reg_addr; + mutex_unlock(&ar->conf_mutex); + + len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_reg_addr_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 reg_addr; + int ret; + + ret = kstrtou32_from_user(user_buf, count, 0, ®_addr); + if (ret) + return ret; + + if (!IS_ALIGNED(reg_addr, 4)) + return -EFAULT; + + mutex_lock(&ar->conf_mutex); + ar->debug.reg_addr = reg_addr; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_reg_addr = { + .read = ath10k_reg_addr_read, + .write = ath10k_reg_addr_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_reg_value_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 buf[48]; + unsigned int len; + u32 reg_addr, reg_val; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + reg_addr = ar->debug.reg_addr; + + reg_val = ath10k_hif_read32(ar, reg_addr); + len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_reg_value_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 reg_addr, reg_val; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + reg_addr = ar->debug.reg_addr; + + ret = kstrtou32_from_user(user_buf, count, 0, ®_val); + if (ret) + goto exit; + + ath10k_hif_write32(ar, reg_addr, reg_val); + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_reg_value = { + .read = ath10k_reg_value_read, + .write = ath10k_reg_value_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_mem_value_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 *buf; + int ret; + + if (*ppos < 0) + return -EINVAL; + + if (!count) + return 0; + + mutex_lock(&ar->conf_mutex); + + buf = vmalloc(count); + if (!buf) { + ret = -ENOMEM; + goto exit; + } + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + ret = ath10k_hif_diag_read(ar, *ppos, buf, count); + if (ret) { + ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n", + (u32)(*ppos), ret); + goto exit; + } + + ret = copy_to_user(user_buf, buf, count); + if (ret) { + ret = -EFAULT; + goto exit; + } + + count -= ret; + *ppos += count; + ret = count; + +exit: + vfree(buf); + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_mem_value_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 *buf; + int ret; + + if (*ppos < 0) + return -EINVAL; + + if (!count) + return 0; + + mutex_lock(&ar->conf_mutex); + + buf = vmalloc(count); + if (!buf) { + ret = -ENOMEM; + goto exit; + } + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + ret = copy_from_user(buf, user_buf, count); + if (ret) { + ret = -EFAULT; + goto exit; + } + + ret = ath10k_hif_diag_write(ar, *ppos, buf, count); + if (ret) { + ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n", + (u32)(*ppos), ret); + goto exit; + } + + *ppos += count; + ret = count; + +exit: + vfree(buf); + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_mem_value = { + .read = ath10k_mem_value_read, + .write = ath10k_mem_value_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static int ath10k_debug_htt_stats_req(struct ath10k *ar) { u64 cookie; @@ -1029,6 +1361,166 @@ exit: return ret; } +/* TODO: Would be nice to always support ethtool stats, would need to + * move the stats storage out of ath10k_debug, or always have ath10k_debug + * struct available.. + */ + +/* This generally cooresponds to the debugfs fw_stats file */ +static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = { + "tx_pkts_nic", + "tx_bytes_nic", + "rx_pkts_nic", + "rx_bytes_nic", + "d_noise_floor", + "d_cycle_count", + "d_phy_error", + "d_rts_bad", + "d_rts_good", + "d_tx_power", /* in .5 dbM I think */ + "d_rx_crc_err", /* fcs_bad */ + "d_no_beacon", + "d_tx_mpdus_queued", + "d_tx_msdu_queued", + "d_tx_msdu_dropped", + "d_local_enqued", + "d_local_freed", + "d_tx_ppdu_hw_queued", + "d_tx_ppdu_reaped", + "d_tx_fifo_underrun", + "d_tx_ppdu_abort", + "d_tx_mpdu_requed", + "d_tx_excessive_retries", + "d_tx_hw_rate", + "d_tx_dropped_sw_retries", + "d_tx_illegal_rate", + "d_tx_continuous_xretries", + "d_tx_timeout", + "d_tx_mpdu_txop_limit", + "d_pdev_resets", + "d_rx_mid_ppdu_route_change", + "d_rx_status", + "d_rx_extra_frags_ring0", + "d_rx_extra_frags_ring1", + "d_rx_extra_frags_ring2", + "d_rx_extra_frags_ring3", + "d_rx_msdu_htt", + "d_rx_mpdu_htt", + "d_rx_msdu_stack", + "d_rx_mpdu_stack", + "d_rx_phy_err", + "d_rx_phy_err_drops", + "d_rx_mpdu_errors", /* FCS, MIC, ENC */ + "d_fw_crash_count", + "d_fw_warm_reset_count", + "d_fw_cold_reset_count", +}; + +#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats) + +void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset == ETH_SS_STATS) + memcpy(data, *ath10k_gstrings_stats, + sizeof(ath10k_gstrings_stats)); +} + +int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset) +{ + if (sset == ETH_SS_STATS) + return ATH10K_SSTATS_LEN; + + return 0; +} + +void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct ath10k *ar = hw->priv; + static const struct ath10k_fw_stats_pdev zero_stats = {}; + const struct ath10k_fw_stats_pdev *pdev_stats; + int i = 0, ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state == ATH10K_STATE_ON) { + ret = ath10k_debug_fw_stats_request(ar); + if (ret) { + /* just print a warning and try to use older results */ + ath10k_warn(ar, + "failed to get fw stats for ethtool: %d\n", + ret); + } + } + + pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs, + struct ath10k_fw_stats_pdev, + list); + if (!pdev_stats) { + /* no results available so just return zeroes */ + pdev_stats = &zero_stats; + } + + spin_lock_bh(&ar->data_lock); + + data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */ + data[i++] = 0; /* tx bytes */ + data[i++] = pdev_stats->htt_mpdus; + data[i++] = 0; /* rx bytes */ + data[i++] = pdev_stats->ch_noise_floor; + data[i++] = pdev_stats->cycle_count; + data[i++] = pdev_stats->phy_err_count; + data[i++] = pdev_stats->rts_bad; + data[i++] = pdev_stats->rts_good; + data[i++] = pdev_stats->chan_tx_power; + data[i++] = pdev_stats->fcs_bad; + data[i++] = pdev_stats->no_beacons; + data[i++] = pdev_stats->mpdu_enqued; + data[i++] = pdev_stats->msdu_enqued; + data[i++] = pdev_stats->wmm_drop; + data[i++] = pdev_stats->local_enqued; + data[i++] = pdev_stats->local_freed; + data[i++] = pdev_stats->hw_queued; + data[i++] = pdev_stats->hw_reaped; + data[i++] = pdev_stats->underrun; + data[i++] = pdev_stats->tx_abort; + data[i++] = pdev_stats->mpdus_requed; + data[i++] = pdev_stats->tx_ko; + data[i++] = pdev_stats->data_rc; + data[i++] = pdev_stats->sw_retry_failure; + data[i++] = pdev_stats->illgl_rate_phy_err; + data[i++] = pdev_stats->pdev_cont_xretry; + data[i++] = pdev_stats->pdev_tx_timeout; + data[i++] = pdev_stats->txop_ovf; + data[i++] = pdev_stats->pdev_resets; + data[i++] = pdev_stats->mid_ppdu_route_change; + data[i++] = pdev_stats->status_rcvd; + data[i++] = pdev_stats->r0_frags; + data[i++] = pdev_stats->r1_frags; + data[i++] = pdev_stats->r2_frags; + data[i++] = pdev_stats->r3_frags; + data[i++] = pdev_stats->htt_msdus; + data[i++] = pdev_stats->htt_mpdus; + data[i++] = pdev_stats->loc_msdus; + data[i++] = pdev_stats->loc_mpdus; + data[i++] = pdev_stats->phy_errs; + data[i++] = pdev_stats->phy_err_drop; + data[i++] = pdev_stats->mpdu_errs; + data[i++] = ar->stats.fw_crash_counter; + data[i++] = ar->stats.fw_warm_reset_counter; + data[i++] = ar->stats.fw_cold_reset_counter; + + spin_unlock_bh(&ar->data_lock); + + mutex_unlock(&ar->conf_mutex); + + WARN_ON(i != ATH10K_SSTATS_LEN); +} + static const struct file_operations fops_fw_dbglog = { .read = ath10k_read_fw_dbglog, .write = ath10k_write_fw_dbglog, @@ -1037,6 +1529,84 @@ static const struct file_operations fops_fw_dbglog = { .llseek = default_llseek, }; +static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + void *buf; + u32 hi_addr; + __le32 addr; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto err; + } + + buf = vmalloc(QCA988X_CAL_DATA_LEN); + if (!buf) { + ret = -ENOMEM; + goto err; + } + + hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); + + ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); + if (ret) { + ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret); + goto err_vfree; + } + + ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf, + QCA988X_CAL_DATA_LEN); + if (ret) { + ath10k_warn(ar, "failed to read calibration data: %d\n", ret); + goto err_vfree; + } + + file->private_data = buf; + + mutex_unlock(&ar->conf_mutex); + + return 0; + +err_vfree: + vfree(buf); + +err: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_debug_cal_data_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + void *buf = file->private_data; + + return simple_read_from_buffer(user_buf, count, ppos, + buf, QCA988X_CAL_DATA_LEN); +} + +static int ath10k_debug_cal_data_release(struct inode *inode, + struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static const struct file_operations fops_cal_data = { + .open = ath10k_debug_cal_data_open, + .read = ath10k_debug_cal_data_read, + .release = ath10k_debug_cal_data_release, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + int ath10k_debug_start(struct ath10k *ar) { int ret; @@ -1057,7 +1627,22 @@ int ath10k_debug_start(struct ath10k *ar) ret); } - return 0; + if (ar->debug.pktlog_filter) { + ret = ath10k_wmi_pdev_pktlog_enable(ar, + ar->debug.pktlog_filter); + if (ret) + /* not serious */ + ath10k_warn(ar, + "failed to enable pktlog filter %x: %d\n", + ar->debug.pktlog_filter, ret); + } else { + ret = ath10k_wmi_pdev_pktlog_disable(ar); + if (ret) + /* not serious */ + ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); + } + + return ret; } void ath10k_debug_stop(struct ath10k *ar) @@ -1072,6 +1657,8 @@ void ath10k_debug_stop(struct ath10k *ar) ar->debug.htt_max_amsdu = 0; ar->debug.htt_max_ampdu = 0; + + ath10k_wmi_pdev_pktlog_disable(ar); } static ssize_t ath10k_write_simulate_radar(struct file *file, @@ -1154,12 +1741,78 @@ static const struct file_operations fops_dfs_stats = { .llseek = default_llseek, }; +static ssize_t ath10k_write_pktlog_filter(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 filter; + int ret; + + if (kstrtouint_from_user(ubuf, count, 0, &filter)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ar->debug.pktlog_filter = filter; + ret = count; + goto out; + } + + if (filter && (filter != ar->debug.pktlog_filter)) { + ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); + if (ret) { + ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", + ar->debug.pktlog_filter, ret); + goto out; + } + } else { + ret = ath10k_wmi_pdev_pktlog_disable(ar); + if (ret) { + ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); + goto out; + } + } + + ar->debug.pktlog_filter = filter; + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%08x\n", + ar->debug.pktlog_filter); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_pktlog_filter = { + .read = ath10k_read_pktlog_filter, + .write = ath10k_write_pktlog_filter, + .open = simple_open +}; + int ath10k_debug_create(struct ath10k *ar) { ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data)); if (!ar->debug.fw_crash_data) return -ENOMEM; + INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); + INIT_LIST_HEAD(&ar->debug.fw_stats.peers); + return 0; } @@ -1167,6 +1820,8 @@ void ath10k_debug_destroy(struct ath10k *ar) { vfree(ar->debug.fw_crash_data); ar->debug.fw_crash_data = NULL; + + ath10k_debug_fw_stats_reset(ar); } int ath10k_debug_register(struct ath10k *ar) @@ -1183,11 +1838,14 @@ int ath10k_debug_register(struct ath10k *ar) INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, ath10k_debug_htt_stats_dwork); - init_completion(&ar->debug.event_stats_compl); + init_completion(&ar->debug.fw_stats_complete); debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_stats); + debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_fw_reset_stats); + debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_wmi_services); @@ -1197,6 +1855,15 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_crash_dump); + debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_reg_addr); + + debugfs_create_file("reg_value", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_reg_value); + + debugfs_create_file("mem_value", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_mem_value); + debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_chip_id); @@ -1210,6 +1877,9 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_dbglog); + debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_cal_data); + if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { debugfs_create_file("dfs_simulate_radar", S_IWUSR, ar->debug.debugfs_phy, ar, @@ -1224,6 +1894,9 @@ int ath10k_debug_register(struct ath10k *ar) &fops_dfs_stats); } + debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_pktlog_filter); + return 0; } @@ -1260,11 +1933,26 @@ void ath10k_dbg_dump(struct ath10k *ar, const char *msg, const char *prefix, const void *buf, size_t len) { + char linebuf[256]; + unsigned int linebuflen; + const void *ptr; + if (ath10k_debug_mask & mask) { if (msg) ath10k_dbg(ar, mask, "%s\n", msg); - print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); + for (ptr = buf; (ptr - buf) < len; ptr += 16) { + linebuflen = 0; + linebuflen += scnprintf(linebuf + linebuflen, + sizeof(linebuf) - linebuflen, + "%s%08x: ", + (prefix ? prefix : ""), + (unsigned int)(ptr - buf)); + hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1, + linebuf + linebuflen, + sizeof(linebuf) - linebuflen, true); + dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf); + } } /* tracing code doesn't like null strings :/ */ diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index b3774f7f492c480d941a300a81b15a2c8a6cb234..1b87a5dbec53c3bd8875774ca8806525d3476efd 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -35,14 +35,24 @@ enum ath10k_debug_mask { ATH10K_DBG_BMI = 0x00000400, ATH10K_DBG_REGULATORY = 0x00000800, ATH10K_DBG_TESTMODE = 0x00001000, + ATH10K_DBG_WMI_PRINT = 0x00002000, ATH10K_DBG_ANY = 0xffffffff, }; +enum ath10k_pktlog_filter { + ATH10K_PKTLOG_RX = 0x000000001, + ATH10K_PKTLOG_TX = 0x000000002, + ATH10K_PKTLOG_RCFIND = 0x000000004, + ATH10K_PKTLOG_RCUPDATE = 0x000000008, + ATH10K_PKTLOG_DBG_PRINT = 0x000000010, + ATH10K_PKTLOG_ANY = 0x00000001f, +}; + extern unsigned int ath10k_debug_mask; -__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...); -__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...); -__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...); void ath10k_print_driver_info(struct ath10k *ar); #ifdef CONFIG_ATH10K_DEBUGFS @@ -52,18 +62,22 @@ int ath10k_debug_create(struct ath10k *ar); void ath10k_debug_destroy(struct ath10k *ar); int ath10k_debug_register(struct ath10k *ar); void ath10k_debug_unregister(struct ath10k *ar); -void ath10k_debug_read_service_map(struct ath10k *ar, - void *service_map, - size_t map_size); -void ath10k_debug_read_target_stats(struct ath10k *ar, - struct wmi_stats_event *ev); +void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb); struct ath10k_fw_crash_data * ath10k_debug_get_new_fw_crash_data(struct ath10k *ar); void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); - #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) +void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data); +int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset); +void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data); + #else static inline int ath10k_debug_start(struct ath10k *ar) { @@ -92,14 +106,8 @@ static inline void ath10k_debug_unregister(struct ath10k *ar) { } -static inline void ath10k_debug_read_service_map(struct ath10k *ar, - void *service_map, - size_t map_size) -{ -} - -static inline void ath10k_debug_read_target_stats(struct ath10k *ar, - struct wmi_stats_event *ev) +static inline void ath10k_debug_fw_stats_process(struct ath10k *ar, + struct sk_buff *skb) { } @@ -116,6 +124,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) +#define ath10k_debug_get_et_strings NULL +#define ath10k_debug_get_et_sset_count NULL +#define ath10k_debug_get_et_stats NULL + #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 62323fea27e136c9fdad3913dfcc36a712cd86ec..0c92e0251e84f64fd0e8b53679deda60803749f9 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -20,6 +20,7 @@ #include #include "core.h" +#include "debug.h" struct ath10k_hif_sg_item { u16 transfer_id; @@ -31,11 +32,9 @@ struct ath10k_hif_sg_item { struct ath10k_hif_cb { int (*tx_completion)(struct ath10k *ar, - struct sk_buff *wbuf, - unsigned transfer_id); + struct sk_buff *wbuf); int (*rx_completion)(struct ath10k *ar, - struct sk_buff *wbuf, - u8 pipe_id); + struct sk_buff *wbuf); }; struct ath10k_hif_ops { @@ -43,6 +42,12 @@ struct ath10k_hif_ops { int (*tx_sg)(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items); + /* read firmware memory through the diagnose interface */ + int (*diag_read)(struct ath10k *ar, u32 address, void *buf, + size_t buf_len); + + int (*diag_write)(struct ath10k *ar, u32 address, const void *data, + int nbytes); /* * API to handle HIF-specific BMI message exchanges, this API is * synchronous and only allowed to be called from a context that @@ -80,6 +85,10 @@ struct ath10k_hif_ops { u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); + u32 (*read32)(struct ath10k *ar, u32 address); + + void (*write32)(struct ath10k *ar, u32 address, u32 value); + /* Power up the device and enter BMI transfer mode for FW download */ int (*power_up)(struct ath10k *ar); @@ -98,6 +107,21 @@ static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items); } +static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + return ar->hif.ops->diag_read(ar, address, buf, buf_len); +} + +static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address, + const void *data, int nbytes) +{ + if (!ar->hif.ops->diag_write) + return -EOPNOTSUPP; + + return ar->hif.ops->diag_write(ar, address, data, nbytes); +} + static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar, void *request, u32 request_len, void *response, u32 *response_len) @@ -177,4 +201,25 @@ static inline int ath10k_hif_resume(struct ath10k *ar) return ar->hif.ops->resume(ar); } +static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address) +{ + if (!ar->hif.ops->read32) { + ath10k_warn(ar, "hif read32 not supported\n"); + return 0xdeaddead; + } + + return ar->hif.ops->read32(ar, address); +} + +static inline void ath10k_hif_write32(struct ath10k *ar, + u32 address, u32 data) +{ + if (!ar->hif.ops->write32) { + ath10k_warn(ar, "hif write32 not supported\n"); + return; + } + + ar->hif.ops->write32(ar, address, data); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 676bd4ed969bd5d9f63817ea6e5229c41a7c644e..f1946a6be442cb1d031b91b0556df1b3ae275b32 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -160,6 +160,7 @@ int ath10k_htc_send(struct ath10k_htc *htc, ath10k_htc_prepare_tx_skb(ep, skb); + skb_cb->eid = eid; skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); ret = dma_mapping_error(dev, skb_cb->paddr); if (ret) @@ -197,15 +198,18 @@ err_pull: } static int ath10k_htc_tx_completion_handler(struct ath10k *ar, - struct sk_buff *skb, - unsigned int eid) + struct sk_buff *skb) { struct ath10k_htc *htc = &ar->htc; - struct ath10k_htc_ep *ep = &htc->endpoint[eid]; + struct ath10k_skb_cb *skb_cb; + struct ath10k_htc_ep *ep; if (WARN_ON_ONCE(!skb)) return 0; + skb_cb = ATH10K_SKB_CB(skb); + ep = &htc->endpoint[skb_cb->eid]; + ath10k_htc_notify_tx_completion(ep, skb); /* the skb now belongs to the completion handler */ @@ -317,8 +321,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, } static int ath10k_htc_rx_completion_handler(struct ath10k *ar, - struct sk_buff *skb, - u8 pipe_id) + struct sk_buff *skb) { int status = 0; struct ath10k_htc *htc = &ar->htc; diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 3b44217a6c190b70749182037e051583983b6c48..1bd5545af903975fcc5651628b46c3a989f25678 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -126,6 +126,7 @@ enum htt_data_tx_ext_tid { * (HL hosts manage queues on the host ) * more_in_batch: only for HL hosts. indicates if more packets are * pending. this allows target to wait and aggregate + * freq: 0 means home channel of given vdev. intended for offchannel */ struct htt_data_tx_desc { u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ @@ -133,7 +134,8 @@ struct htt_data_tx_desc { __le16 len; __le16 id; __le32 frags_paddr; - __le32 peerid; + __le16 peerid; + __le16 freq; u8 prefetch[0]; /* start of frame, for FW classification engine */ } __packed; @@ -156,6 +158,9 @@ enum htt_rx_ring_flags { HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15 }; +#define HTT_RX_RING_SIZE_MIN 128 +#define HTT_RX_RING_SIZE_MAX 2048 + struct htt_rx_ring_setup_ring { __le32 fw_idx_shadow_reg_paddr; __le32 rx_ring_base_paddr; @@ -725,7 +730,7 @@ static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test) */ struct htt_pktlog_msg { u8 pad[3]; - __le32 payload[1 /* or more */]; + u8 payload[0]; } __packed; struct htt_dbg_stats_rx_reorder_stats { diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 60d40a04508b1d8189f41dd88d417d76d68ac50a..9c782a42665e1aaf43bfbca441631ee58da50c09 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -25,19 +25,8 @@ #include -/* slightly larger than one large A-MPDU */ -#define HTT_RX_RING_SIZE_MIN 128 - -/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ -#define HTT_RX_RING_SIZE_MAX 2048 - -#define HTT_RX_AVG_FRM_BYTES 1000 - -/* ms, very conservative */ -#define HTT_RX_HOST_LATENCY_MAX_MS 20 - -/* ms, conservative */ -#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 +#define HTT_RX_RING_SIZE 1024 +#define HTT_RX_RING_FILL_LEVEL 1000 /* when under memory pressure rx ring refill may fail and needs a retry */ #define HTT_RX_RING_REFILL_RETRY_MS 50 @@ -45,68 +34,6 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); static void ath10k_htt_txrx_compl_task(unsigned long ptr); -static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) -{ - int size; - - /* - * It is expected that the host CPU will typically be able to - * service the rx indication from one A-MPDU before the rx - * indication from the subsequent A-MPDU happens, roughly 1-2 ms - * later. However, the rx ring should be sized very conservatively, - * to accomodate the worst reasonable delay before the host CPU - * services a rx indication interrupt. - * - * The rx ring need not be kept full of empty buffers. In theory, - * the htt host SW can dynamically track the low-water mark in the - * rx ring, and dynamically adjust the level to which the rx ring - * is filled with empty buffers, to dynamically meet the desired - * low-water mark. - * - * In contrast, it's difficult to resize the rx ring itself, once - * it's in use. Thus, the ring itself should be sized very - * conservatively, while the degree to which the ring is filled - * with empty buffers should be sized moderately conservatively. - */ - - /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ - size = - htt->max_throughput_mbps + - 1000 / - (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; - - if (size < HTT_RX_RING_SIZE_MIN) - size = HTT_RX_RING_SIZE_MIN; - - if (size > HTT_RX_RING_SIZE_MAX) - size = HTT_RX_RING_SIZE_MAX; - - size = roundup_pow_of_two(size); - - return size; -} - -static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) -{ - int size; - - /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ - size = - htt->max_throughput_mbps * - 1000 / - (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; - - /* - * Make sure the fill level is at least 1 less than the ring size. - * Leaving 1 element empty allows the SW to easily distinguish - * between a full ring vs. an empty ring. - */ - if (size >= htt->rx_ring.size) - size = htt->rx_ring.size - 1; - - return size; -} - static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) { struct sk_buff *skb; @@ -291,50 +218,38 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) htt->rx_ring.sw_rd_idx.msdu_payld = idx; htt->rx_ring.fill_cnt--; - return msdu; -} + dma_unmap_single(htt->ar->dev, + ATH10K_SKB_CB(msdu)->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", + msdu->data, msdu->len + skb_tailroom(msdu)); -static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) -{ - struct sk_buff *next; - - while (skb) { - next = skb->next; - dev_kfree_skb_any(skb); - skb = next; - } + return msdu; } /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, u8 **fw_desc, int *fw_desc_len, - struct sk_buff **head_msdu, - struct sk_buff **tail_msdu, - u32 *attention) + struct sk_buff_head *amsdu) { struct ath10k *ar = htt->ar; int msdu_len, msdu_chaining = 0; - struct sk_buff *msdu, *next; + struct sk_buff *msdu; struct htt_rx_desc *rx_desc; lockdep_assert_held(&htt->rx_ring.lock); - if (htt->rx_confused) { - ath10k_warn(ar, "htt is confused. refusing rx\n"); - return -1; - } - - msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); - while (msdu) { + for (;;) { int last_msdu, msdu_len_invalid, msdu_chained; - dma_unmap_single(htt->ar->dev, - ATH10K_SKB_CB(msdu)->paddr, - msdu->len + skb_tailroom(msdu), - DMA_FROM_DEVICE); + msdu = ath10k_htt_rx_netbuf_pop(htt); + if (!msdu) { + __skb_queue_purge(amsdu); + return -ENOENT; + } - ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ", - msdu->data, msdu->len + skb_tailroom(msdu)); + __skb_queue_tail(amsdu, msdu); rx_desc = (struct htt_rx_desc *)msdu->data; @@ -353,19 +268,10 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, */ if (!(__le32_to_cpu(rx_desc->attention.flags) & RX_ATTENTION_FLAGS_MSDU_DONE)) { - ath10k_htt_rx_free_msdu_chain(*head_msdu); - *head_msdu = NULL; - msdu = NULL; - ath10k_err(ar, "htt rx stopped. cannot recover\n"); - htt->rx_confused = true; - break; + __skb_queue_purge(amsdu); + return -EIO; } - *attention |= __le32_to_cpu(rx_desc->attention.flags) & - (RX_ATTENTION_FLAGS_TKIP_MIC_ERR | - RX_ATTENTION_FLAGS_DECRYPT_ERR | - RX_ATTENTION_FLAGS_FCS_ERR | - RX_ATTENTION_FLAGS_MGMT_TYPE); /* * Copy the FW rx descriptor for this MSDU from the rx * indication message into the MSDU's netbuf. HL uses the @@ -422,43 +328,32 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); msdu_len -= msdu->len; - /* FIXME: Do chained buffers include htt_rx_desc or not? */ + /* Note: Chained buffers do not contain rx descriptor */ while (msdu_chained--) { - struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); - - dma_unmap_single(htt->ar->dev, - ATH10K_SKB_CB(next)->paddr, - next->len + skb_tailroom(next), - DMA_FROM_DEVICE); - - ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, - "htt rx chained: ", next->data, - next->len + skb_tailroom(next)); - - skb_trim(next, 0); - skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); - msdu_len -= next->len; + msdu = ath10k_htt_rx_netbuf_pop(htt); + if (!msdu) { + __skb_queue_purge(amsdu); + return -ENOENT; + } - msdu->next = next; - msdu = next; + __skb_queue_tail(amsdu, msdu); + skb_trim(msdu, 0); + skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); + msdu_len -= msdu->len; msdu_chaining = 1; } last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & RX_MSDU_END_INFO0_LAST_MSDU; - if (last_msdu) { - msdu->next = NULL; - break; - } + trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, + sizeof(*rx_desc) - sizeof(u32)); - next = ath10k_htt_rx_netbuf_pop(htt); - msdu->next = next; - msdu = next; + if (last_msdu) + break; } - *tail_msdu = msdu; - if (*head_msdu == NULL) + if (skb_queue_empty(amsdu)) msdu_chaining = -1; /* @@ -492,25 +387,20 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) size_t size; struct timer_list *timer = &htt->rx_ring.refill_retry_timer; - htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); + htt->rx_confused = false; + + /* XXX: The fill level could be changed during runtime in response to + * the host processing latency. Is this really worth it? + */ + htt->rx_ring.size = HTT_RX_RING_SIZE; + htt->rx_ring.size_mask = htt->rx_ring.size - 1; + htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL; + if (!is_power_of_2(htt->rx_ring.size)) { ath10k_warn(ar, "htt rx ring size is not power of 2\n"); return -EINVAL; } - htt->rx_ring.size_mask = htt->rx_ring.size - 1; - - /* - * Set the initial value for the level to which the rx ring - * should be filled, based on the max throughput and the - * worst likely latency for the host to fill the rx ring - * with new buffers. In theory, this fill level can be - * dynamically adjusted from the initial value set here, to - * reflect the actual host latency rather than a - * conservative assumption about the host latency. - */ - htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); - htt->rx_ring.netbufs_ring = kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *), GFP_KERNEL); @@ -581,73 +471,50 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, enum htt_rx_mpdu_encrypt_type type) { switch (type) { + case HTT_RX_MPDU_ENCRYPT_NONE: + return 0; case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: - return 4; + return IEEE80211_WEP_IV_LEN; case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: - case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: - case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ + return IEEE80211_TKIP_IV_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: - return 8; - case HTT_RX_MPDU_ENCRYPT_NONE: - return 0; + return IEEE80211_CCMP_HDR_LEN; + case HTT_RX_MPDU_ENCRYPT_WEP128: + case HTT_RX_MPDU_ENCRYPT_WAPI: + break; } - ath10k_warn(ar, "unknown encryption type %d\n", type); + ath10k_warn(ar, "unsupported encryption type %d\n", type); return 0; } +#define MICHAEL_MIC_LEN 8 + static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, enum htt_rx_mpdu_encrypt_type type) { switch (type) { case HTT_RX_MPDU_ENCRYPT_NONE: + return 0; case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: - case HTT_RX_MPDU_ENCRYPT_WEP128: - case HTT_RX_MPDU_ENCRYPT_WAPI: - return 0; + return IEEE80211_WEP_ICV_LEN; case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: - return 4; + return IEEE80211_TKIP_ICV_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: - return 8; + return IEEE80211_CCMP_MIC_LEN; + case HTT_RX_MPDU_ENCRYPT_WEP128: + case HTT_RX_MPDU_ENCRYPT_WAPI: + break; } - ath10k_warn(ar, "unknown encryption type %d\n", type); + ath10k_warn(ar, "unsupported encryption type %d\n", type); return 0; } -/* Applies for first msdu in chain, before altering it. */ -static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) -{ - struct htt_rx_desc *rxd; - enum rx_msdu_decap_format fmt; - - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - - if (fmt == RX_MSDU_DECAP_RAW) - return (void *)skb->data; - - return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; -} - -/* This function only applies for first msdu in an msdu chain */ -static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) -{ - u8 *qc; - - if (ieee80211_is_data_qos(hdr->frame_control)) { - qc = ieee80211_get_qos_ctl(hdr); - if (qc[0] & 0x80) - return true; - } - return false; -} - struct rfc1042_hdr { u8 llc_dsap; u8 llc_ssap; @@ -682,23 +549,34 @@ static const u8 rx_legacy_rate_idx[] = { }; static void ath10k_htt_rx_h_rates(struct ath10k *ar, - enum ieee80211_band band, - u8 info0, u32 info1, u32 info2, - struct ieee80211_rx_status *status) + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd) { + enum ieee80211_band band; u8 cck, rate, rate_idx, bw, sgi, mcs, nss; u8 preamble = 0; + u32 info1, info2, info3; - /* Check if valid fields */ - if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID)) + /* Band value can't be set as undefined but freq can be 0 - use that to + * determine whether band is provided. + * + * FIXME: Perhaps this can go away if CCK rate reporting is a little + * reworked? + */ + if (!status->freq) return; - preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE); + band = status->band; + info1 = __le32_to_cpu(rxd->ppdu_start.info1); + info2 = __le32_to_cpu(rxd->ppdu_start.info2); + info3 = __le32_to_cpu(rxd->ppdu_start.info3); + + preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); switch (preamble) { case HTT_RX_LEGACY: - cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK; - rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE); + cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; + rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); rate_idx = 0; if (rate < 0x08 || rate > 0x0F) @@ -725,11 +603,11 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, break; case HTT_RX_HT: case HTT_RX_HT_WITH_TXBF: - /* HT-SIG - Table 20-11 in info1 and info2 */ - mcs = info1 & 0x1F; + /* HT-SIG - Table 20-11 in info2 and info3 */ + mcs = info2 & 0x1F; nss = mcs >> 3; - bw = (info1 >> 7) & 1; - sgi = (info2 >> 7) & 1; + bw = (info2 >> 7) & 1; + sgi = (info3 >> 7) & 1; status->rate_idx = mcs; status->flag |= RX_FLAG_HT; @@ -740,12 +618,12 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, break; case HTT_RX_VHT: case HTT_RX_VHT_WITH_TXBF: - /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 + /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 TODO check this */ - mcs = (info2 >> 4) & 0x0F; - nss = ((info1 >> 10) & 0x07) + 1; - bw = info1 & 3; - sgi = info2 & 1; + mcs = (info3 >> 4) & 0x0F; + nss = ((info2 >> 10) & 0x07) + 1; + bw = info2 & 3; + sgi = info3 & 1; status->rate_idx = mcs; status->vht_nss = nss; @@ -773,41 +651,6 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, } } -static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt, - struct ieee80211_rx_status *rx_status, - struct sk_buff *skb, - enum htt_rx_mpdu_encrypt_type enctype, - enum rx_msdu_decap_format fmt, - bool dot11frag) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - - rx_status->flag &= ~(RX_FLAG_DECRYPTED | - RX_FLAG_IV_STRIPPED | - RX_FLAG_MMIC_STRIPPED); - - if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) - return; - - /* - * There's no explicit rx descriptor flag to indicate whether a given - * frame has been decrypted or not. We're forced to use the decap - * format as an implicit indication. However fragmentation rx is always - * raw and it probably never reports undecrypted raws. - * - * This makes sure sniffed frames are reported as-is without stripping - * the protected flag. - */ - if (fmt == RX_MSDU_DECAP_RAW && !dot11frag) - return; - - rx_status->flag |= RX_FLAG_DECRYPTED | - RX_FLAG_IV_STRIPPED | - RX_FLAG_MMIC_STRIPPED; - hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) & - ~IEEE80211_FCTL_PROTECTED); -} - static bool ath10k_htt_rx_h_channel(struct ath10k *ar, struct ieee80211_rx_status *status) { @@ -828,6 +671,72 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar, return true; } +static void ath10k_htt_rx_h_signal(struct ath10k *ar, + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd) +{ + /* FIXME: Get real NF */ + status->signal = ATH10K_DEFAULT_NOISE_FLOOR + + rxd->ppdu_start.rssi_comb; + status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; +} + +static void ath10k_htt_rx_h_mactime(struct ath10k *ar, + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd) +{ + /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This + * means all prior MSDUs in a PPDU are reported to mac80211 without the + * TSF. Is it worth holding frames until end of PPDU is known? + * + * FIXME: Can we get/compute 64bit TSF? + */ + status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp); + status->flag |= RX_FLAG_MACTIME_END; +} + +static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *status) +{ + struct sk_buff *first; + struct htt_rx_desc *rxd; + bool is_first_ppdu; + bool is_last_ppdu; + + if (skb_queue_empty(amsdu)) + return; + + first = skb_peek(amsdu); + rxd = (void *)first->data - sizeof(*rxd); + + is_first_ppdu = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); + is_last_ppdu = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); + + if (is_first_ppdu) { + /* New PPDU starts so clear out the old per-PPDU status. */ + status->freq = 0; + status->rate_idx = 0; + status->vht_nss = 0; + status->vht_flag &= ~RX_VHT_FLAG_80MHZ; + status->flag &= ~(RX_FLAG_HT | + RX_FLAG_VHT | + RX_FLAG_SHORT_GI | + RX_FLAG_40MHZ | + RX_FLAG_MACTIME_END); + status->flag |= RX_FLAG_NO_SIGNAL_VAL; + + ath10k_htt_rx_h_signal(ar, status, rxd); + ath10k_htt_rx_h_channel(ar, status); + ath10k_htt_rx_h_rates(ar, status, rxd); + } + + if (is_last_ppdu) + ath10k_htt_rx_h_mactime(ar, status, rxd); +} + static const char * const tid_to_ac[] = { "BE", "BK", @@ -892,6 +801,8 @@ static void ath10k_process_rx(struct ath10k *ar, !!(status->flag & RX_FLAG_AMSDU_MORE)); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", skb->data, skb->len); + trace_ath10k_rx_hdr(ar, skb->data, skb->len); + trace_ath10k_rx_payload(ar, skb->data, skb->len); ieee80211_rx(ar->hw, skb); } @@ -902,187 +813,263 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) return round_up(ieee80211_hdrlen(hdr->frame_control), 4); } -static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, - struct ieee80211_rx_status *rx_status, - struct sk_buff *skb_in) +static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + enum htt_rx_mpdu_encrypt_type enctype, + bool is_decrypted) { - struct ath10k *ar = htt->ar; + struct ieee80211_hdr *hdr; struct htt_rx_desc *rxd; - struct sk_buff *skb = skb_in; - struct sk_buff *first; - enum rx_msdu_decap_format fmt; - enum htt_rx_mpdu_encrypt_type enctype; + size_t hdr_len; + size_t crypto_len; + bool is_first; + bool is_last; + + rxd = (void *)msdu->data - sizeof(*rxd); + is_first = !!(rxd->msdu_end.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); + is_last = !!(rxd->msdu_end.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); + + /* Delivered decapped frame: + * [802.11 header] + * [crypto param] <-- can be trimmed if !fcs_err && + * !decrypt_err && !peer_idx_invalid + * [amsdu header] <-- only if A-MSDU + * [rfc1042/llc] + * [payload] + * [FCS] <-- at end, needs to be trimmed + */ + + /* This probably shouldn't happen but warn just in case */ + if (unlikely(WARN_ON_ONCE(!is_first))) + return; + + /* This probably shouldn't happen but warn just in case */ + if (unlikely(WARN_ON_ONCE(!(is_first && is_last)))) + return; + + skb_trim(msdu, msdu->len - FCS_LEN); + + /* In most cases this will be true for sniffed frames. It makes sense + * to deliver them as-is without stripping the crypto param. This would + * also make sense for software based decryption (which is not + * implemented in ath10k). + * + * If there's no error then the frame is decrypted. At least that is + * the case for frames that come in via fragmented rx indication. + */ + if (!is_decrypted) + return; + + /* The payload is decrypted so strip crypto params. Start from tail + * since hdr is used to compute some stuff. + */ + + hdr = (void *)msdu->data; + + /* Tail */ + skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype)); + + /* MMIC */ + if (!ieee80211_has_morefrags(hdr->frame_control) && + enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) + skb_trim(msdu, msdu->len - 8); + + /* Head */ + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); + + memmove((void *)msdu->data + crypto_len, + (void *)msdu->data, hdr_len); + skb_pull(msdu, crypto_len); +} + +static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + const u8 first_hdr[64]) +{ struct ieee80211_hdr *hdr; - u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos; - unsigned int hdr_len; + size_t hdr_len; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; - rxd = (void *)skb->data - sizeof(*rxd); - enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); + /* Delivered decapped frame: + * [nwifi 802.11 header] <-- replaced with 802.11 hdr + * [rfc1042/llc] + * + * Note: The nwifi header doesn't have QoS Control and is + * (always?) a 3addr frame. + * + * Note2: There's no A-MSDU subframe header. Even if it's part + * of an A-MSDU. + */ + + /* pull decapped header and copy SA & DA */ + hdr = (struct ieee80211_hdr *)msdu->data; + hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); + ether_addr_copy(da, ieee80211_get_DA(hdr)); + ether_addr_copy(sa, ieee80211_get_SA(hdr)); + skb_pull(msdu, hdr_len); - hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; + /* push original 802.11 header */ + hdr = (struct ieee80211_hdr *)first_hdr; hdr_len = ieee80211_hdrlen(hdr->frame_control); - memcpy(hdr_buf, hdr, hdr_len); - hdr = (struct ieee80211_hdr *)hdr_buf; - - first = skb; - while (skb) { - void *decap_hdr; - int len; - - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - decap_hdr = (void *)rxd->rx_hdr_status; - - skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); - - /* First frame in an A-MSDU chain has more decapped data. */ - if (skb == first) { - len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); - len += round_up(ath10k_htt_rx_crypto_param_len(ar, - enctype), 4); - decap_hdr += len; - } + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); - switch (fmt) { - case RX_MSDU_DECAP_RAW: - /* remove trailing FCS */ - skb_trim(skb, skb->len - FCS_LEN); - break; - case RX_MSDU_DECAP_NATIVE_WIFI: - /* pull decapped header and copy SA & DA */ - hdr = (struct ieee80211_hdr *)skb->data; - hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); - ether_addr_copy(da, ieee80211_get_DA(hdr)); - ether_addr_copy(sa, ieee80211_get_SA(hdr)); - skb_pull(skb, hdr_len); - - /* push original 802.11 header */ - hdr = (struct ieee80211_hdr *)hdr_buf; - hdr_len = ieee80211_hdrlen(hdr->frame_control); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); - - /* original A-MSDU header has the bit set but we're - * not including A-MSDU subframe header */ - hdr = (struct ieee80211_hdr *)skb->data; - qos = ieee80211_get_qos_ctl(hdr); - qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - - /* original 802.11 header has a different DA and in - * case of 4addr it may also have different SA - */ - ether_addr_copy(ieee80211_get_DA(hdr), da); - ether_addr_copy(ieee80211_get_SA(hdr), sa); - break; - case RX_MSDU_DECAP_ETHERNET2_DIX: - /* strip ethernet header and insert decapped 802.11 - * header, amsdu subframe header and rfc1042 header */ + /* original 802.11 header has a different DA and in + * case of 4addr it may also have different SA + */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(ieee80211_get_DA(hdr), da); + ether_addr_copy(ieee80211_get_SA(hdr), sa); +} - len = 0; - len += sizeof(struct rfc1042_hdr); - len += sizeof(struct amsdu_subframe_hdr); +static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, + struct sk_buff *msdu, + enum htt_rx_mpdu_encrypt_type enctype) +{ + struct ieee80211_hdr *hdr; + struct htt_rx_desc *rxd; + size_t hdr_len, crypto_len; + void *rfc1042; + bool is_first, is_last, is_amsdu; - skb_pull(skb, sizeof(struct ethhdr)); - memcpy(skb_push(skb, len), decap_hdr, len); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); - break; - case RX_MSDU_DECAP_8023_SNAP_LLC: - /* insert decapped 802.11 header making a singly - * A-MSDU */ - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); - break; - } + rxd = (void *)msdu->data - sizeof(*rxd); + hdr = (void *)rxd->rx_hdr_status; - skb_in = skb; - ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt, - false); - skb = skb->next; - skb_in->next = NULL; + is_first = !!(rxd->msdu_end.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); + is_last = !!(rxd->msdu_end.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); + is_amsdu = !(is_first && is_last); - if (skb) - rx_status->flag |= RX_FLAG_AMSDU_MORE; - else - rx_status->flag &= ~RX_FLAG_AMSDU_MORE; + rfc1042 = hdr; + + if (is_first) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); - ath10k_process_rx(htt->ar, rx_status, skb_in); + rfc1042 += round_up(hdr_len, 4) + + round_up(crypto_len, 4); } - /* FIXME: It might be nice to re-assemble the A-MSDU when there's a - * monitor interface active for sniffing purposes. */ + if (is_amsdu) + rfc1042 += sizeof(struct amsdu_subframe_hdr); + + return rfc1042; } -static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, - struct ieee80211_rx_status *rx_status, - struct sk_buff *skb) +static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + const u8 first_hdr[64], + enum htt_rx_mpdu_encrypt_type enctype) { - struct ath10k *ar = htt->ar; - struct htt_rx_desc *rxd; struct ieee80211_hdr *hdr; - enum rx_msdu_decap_format fmt; - enum htt_rx_mpdu_encrypt_type enctype; - int hdr_len; + struct ethhdr *eth; + size_t hdr_len; void *rfc1042; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; - /* This shouldn't happen. If it does than it may be a FW bug. */ - if (skb->next) { - ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n"); - ath10k_htt_rx_free_msdu_chain(skb->next); - skb->next = NULL; - } + /* Delivered decapped frame: + * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc + * [payload] + */ - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); - hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; + rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); + if (WARN_ON_ONCE(!rfc1042)) + return; + + /* pull decapped header and copy SA & DA */ + eth = (struct ethhdr *)msdu->data; + ether_addr_copy(da, eth->h_dest); + ether_addr_copy(sa, eth->h_source); + skb_pull(msdu, sizeof(struct ethhdr)); + + /* push rfc1042/llc/snap */ + memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, + sizeof(struct rfc1042_hdr)); + + /* push original 802.11 header */ + hdr = (struct ieee80211_hdr *)first_hdr; hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); - skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); + /* original 802.11 header has a different DA and in + * case of 4addr it may also have different SA + */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(ieee80211_get_DA(hdr), da); + ether_addr_copy(ieee80211_get_SA(hdr), sa); +} - switch (fmt) { +static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + const u8 first_hdr[64]) +{ + struct ieee80211_hdr *hdr; + size_t hdr_len; + + /* Delivered decapped frame: + * [amsdu header] <-- replaced with 802.11 hdr + * [rfc1042/llc] + * [payload] + */ + + skb_pull(msdu, sizeof(struct amsdu_subframe_hdr)); + + hdr = (struct ieee80211_hdr *)first_hdr; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); +} + +static void ath10k_htt_rx_h_undecap(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + u8 first_hdr[64], + enum htt_rx_mpdu_encrypt_type enctype, + bool is_decrypted) +{ + struct htt_rx_desc *rxd; + enum rx_msdu_decap_format decap; + struct ieee80211_hdr *hdr; + + /* First msdu's decapped header: + * [802.11 header] <-- padded to 4 bytes long + * [crypto param] <-- padded to 4 bytes long + * [amsdu header] <-- only if A-MSDU + * [rfc1042/llc] + * + * Other (2nd, 3rd, ..) msdu's decapped header: + * [amsdu header] <-- only if A-MSDU + * [rfc1042/llc] + */ + + rxd = (void *)msdu->data - sizeof(*rxd); + hdr = (void *)rxd->rx_hdr_status; + decap = MS(__le32_to_cpu(rxd->msdu_start.info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); + + switch (decap) { case RX_MSDU_DECAP_RAW: - /* remove trailing FCS */ - skb_trim(skb, skb->len - FCS_LEN); + ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, + is_decrypted); break; case RX_MSDU_DECAP_NATIVE_WIFI: - /* Pull decapped header */ - hdr = (struct ieee80211_hdr *)skb->data; - hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); - skb_pull(skb, hdr_len); - - /* Push original header */ - hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; - hdr_len = ieee80211_hdrlen(hdr->frame_control); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr); break; case RX_MSDU_DECAP_ETHERNET2_DIX: - /* strip ethernet header and insert decapped 802.11 header and - * rfc1042 header */ - - rfc1042 = hdr; - rfc1042 += roundup(hdr_len, 4); - rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar, - enctype), 4); - - skb_pull(skb, sizeof(struct ethhdr)); - memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), - rfc1042, sizeof(struct rfc1042_hdr)); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); break; case RX_MSDU_DECAP_8023_SNAP_LLC: - /* remove A-MSDU subframe header and insert - * decapped 802.11 header. rfc1042 header is already there */ - - skb_pull(skb, sizeof(struct amsdu_subframe_hdr)); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr); break; } - - ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false); - - ath10k_process_rx(htt->ar, rx_status, skb); } static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) @@ -1116,10 +1103,128 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) return CHECKSUM_UNNECESSARY; } -static int ath10k_unchain_msdu(struct sk_buff *msdu_head) +static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) +{ + msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); +} + +static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *status) { - struct sk_buff *next = msdu_head->next; - struct sk_buff *to_free = next; + struct sk_buff *first; + struct sk_buff *last; + struct sk_buff *msdu; + struct htt_rx_desc *rxd; + struct ieee80211_hdr *hdr; + enum htt_rx_mpdu_encrypt_type enctype; + u8 first_hdr[64]; + u8 *qos; + size_t hdr_len; + bool has_fcs_err; + bool has_crypto_err; + bool has_tkip_err; + bool has_peer_idx_invalid; + bool is_decrypted; + u32 attention; + + if (skb_queue_empty(amsdu)) + return; + + first = skb_peek(amsdu); + rxd = (void *)first->data - sizeof(*rxd); + + enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), + RX_MPDU_START_INFO0_ENCRYPT_TYPE); + + /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 + * decapped header. It'll be used for undecapping of each MSDU. + */ + hdr = (void *)rxd->rx_hdr_status; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(first_hdr, hdr, hdr_len); + + /* Each A-MSDU subframe will use the original header as the base and be + * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. + */ + hdr = (void *)first_hdr; + qos = ieee80211_get_qos_ctl(hdr); + qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + + /* Some attention flags are valid only in the last MSDU. */ + last = skb_peek_tail(amsdu); + rxd = (void *)last->data - sizeof(*rxd); + attention = __le32_to_cpu(rxd->attention.flags); + + has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); + has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); + has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); + has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); + + /* Note: If hardware captures an encrypted frame that it can't decrypt, + * e.g. due to fcs error, missing peer or invalid key data it will + * report the frame as raw. + */ + is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && + !has_fcs_err && + !has_crypto_err && + !has_peer_idx_invalid); + + /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ + status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | + RX_FLAG_MMIC_ERROR | + RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED); + + if (has_fcs_err) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (has_tkip_err) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (is_decrypted) + status->flag |= RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; + + skb_queue_walk(amsdu, msdu) { + ath10k_htt_rx_h_csum_offload(msdu); + ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, + is_decrypted); + + /* Undecapping involves copying the original 802.11 header back + * to sk_buff. If frame is protected and hardware has decrypted + * it then remove the protected bit. + */ + if (!is_decrypted) + continue; + + hdr = (void *)msdu->data; + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } +} + +static void ath10k_htt_rx_h_deliver(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *status) +{ + struct sk_buff *msdu; + + while ((msdu = __skb_dequeue(amsdu))) { + /* Setup per-MSDU flags */ + if (skb_queue_empty(amsdu)) + status->flag &= ~RX_FLAG_AMSDU_MORE; + else + status->flag |= RX_FLAG_AMSDU_MORE; + + ath10k_process_rx(ar, status, msdu); + } +} + +static int ath10k_unchain_msdu(struct sk_buff_head *amsdu) +{ + struct sk_buff *skb, *first; int space; int total_len = 0; @@ -1130,113 +1235,142 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head) * skb? */ - msdu_head->next = NULL; + first = __skb_dequeue(amsdu); /* Allocate total length all at once. */ - while (next) { - total_len += next->len; - next = next->next; - } + skb_queue_walk(amsdu, skb) + total_len += skb->len; - space = total_len - skb_tailroom(msdu_head); + space = total_len - skb_tailroom(first); if ((space > 0) && - (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) { + (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { /* TODO: bump some rx-oom error stat */ /* put it back together so we can free the * whole list at once. */ - msdu_head->next = to_free; + __skb_queue_head(amsdu, first); return -1; } /* Walk list again, copying contents into * msdu_head */ - next = to_free; - while (next) { - skb_copy_from_linear_data(next, skb_put(msdu_head, next->len), - next->len); - next = next->next; + while ((skb = __skb_dequeue(amsdu))) { + skb_copy_from_linear_data(skb, skb_put(first, skb->len), + skb->len); + dev_kfree_skb_any(skb); } - /* If here, we have consolidated skb. Free the - * fragments and pass the main skb on up the - * stack. - */ - ath10k_htt_rx_free_msdu_chain(to_free); + __skb_queue_head(amsdu, first); return 0; } -static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, - struct sk_buff *head, - enum htt_rx_mpdu_status status, - bool channel_set, - u32 attention) +static void ath10k_htt_rx_h_unchain(struct ath10k *ar, + struct sk_buff_head *amsdu, + bool chained) { - struct ath10k *ar = htt->ar; + struct sk_buff *first; + struct htt_rx_desc *rxd; + enum rx_msdu_decap_format decap; - if (head->len == 0) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt rx dropping due to zero-len\n"); - return false; - } + first = skb_peek(amsdu); + rxd = (void *)first->data - sizeof(*rxd); + decap = MS(__le32_to_cpu(rxd->msdu_start.info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); - if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt rx dropping due to decrypt-err\n"); - return false; - } + if (!chained) + return; - if (!channel_set) { - ath10k_warn(ar, "no channel configured; ignoring frame!\n"); - return false; + /* FIXME: Current unchaining logic can only handle simple case of raw + * msdu chaining. If decapping is other than raw the chaining may be + * more complex and this isn't handled by the current code. Don't even + * try re-constructing such frames - it'll be pretty much garbage. + */ + if (decap != RX_MSDU_DECAP_RAW || + skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { + __skb_queue_purge(amsdu); + return; } - /* Skip mgmt frames while we handle this in WMI */ - if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL || - attention & RX_ATTENTION_FLAGS_MGMT_TYPE) { - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); + ath10k_unchain_msdu(amsdu); +} + +static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *rx_status) +{ + struct sk_buff *msdu; + struct htt_rx_desc *rxd; + bool is_mgmt; + bool has_fcs_err; + + msdu = skb_peek(amsdu); + rxd = (void *)msdu->data - sizeof(*rxd); + + /* FIXME: It might be a good idea to do some fuzzy-testing to drop + * invalid/dangerous frames. + */ + + if (!rx_status->freq) { + ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n"); return false; } - if (status != HTT_RX_IND_MPDU_STATUS_OK && - status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && - status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER && - !htt->ar->monitor_started) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt rx ignoring frame w/ status %d\n", - status); + is_mgmt = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); + has_fcs_err = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR)); + + /* Management frames are handled via WMI events. The pros of such + * approach is that channel is explicitly provided in WMI events + * whereas HTT doesn't provide channel information for Rxed frames. + * + * However some firmware revisions don't report corrupted frames via + * WMI so don't drop them. + */ + if (is_mgmt && !has_fcs_err) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); return false; } - if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt rx CAC running\n"); + if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); return false; } return true; } +static void ath10k_htt_rx_h_filter(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *rx_status) +{ + if (skb_queue_empty(amsdu)) + return; + + if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) + return; + + __skb_queue_purge(amsdu); +} + static void ath10k_htt_rx_handler(struct ath10k_htt *htt, struct htt_rx_indication *rx) { struct ath10k *ar = htt->ar; struct ieee80211_rx_status *rx_status = &htt->rx_status; struct htt_rx_indication_mpdu_range *mpdu_ranges; - struct htt_rx_desc *rxd; - enum htt_rx_mpdu_status status; - struct ieee80211_hdr *hdr; + struct sk_buff_head amsdu; int num_mpdu_ranges; - u32 attention; int fw_desc_len; u8 *fw_desc; - bool channel_set; - int i, j; - int ret; + int i, ret, mpdu_count = 0; lockdep_assert_held(&htt->rx_ring.lock); + if (htt->rx_confused) + return; + fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); fw_desc = (u8 *)&rx->fw_desc; @@ -1244,92 +1378,33 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); - /* Fill this once, while this is per-ppdu */ - if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) { - memset(rx_status, 0, sizeof(*rx_status)); - rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + - rx->ppdu.combined_rssi; - } - - if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) { - /* TSF available only in 32-bit */ - rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff; - rx_status->flag |= RX_FLAG_MACTIME_END; - } - - channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status); - - if (channel_set) { - ath10k_htt_rx_h_rates(htt->ar, rx_status->band, - rx->ppdu.info0, - __le32_to_cpu(rx->ppdu.info1), - __le32_to_cpu(rx->ppdu.info2), - rx_status); - } - ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", rx, sizeof(*rx) + (sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges)); - for (i = 0; i < num_mpdu_ranges; i++) { - status = mpdu_ranges[i].mpdu_range_status; - - for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { - struct sk_buff *msdu_head, *msdu_tail; - - attention = 0; - msdu_head = NULL; - msdu_tail = NULL; - ret = ath10k_htt_rx_amsdu_pop(htt, - &fw_desc, - &fw_desc_len, - &msdu_head, - &msdu_tail, - &attention); - - if (ret < 0) { - ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n", - ret); - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; - } - - rxd = container_of((void *)msdu_head->data, - struct htt_rx_desc, - msdu_payload); - - if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head, - status, - channel_set, - attention)) { - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; - } - - if (ret > 0 && - ath10k_unchain_msdu(msdu_head) < 0) { - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; - } - - if (attention & RX_ATTENTION_FLAGS_FCS_ERR) - rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - else - rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC; - - if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR) - rx_status->flag |= RX_FLAG_MMIC_ERROR; - else - rx_status->flag &= ~RX_FLAG_MMIC_ERROR; - - hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); - - if (ath10k_htt_rx_hdr_is_amsdu(hdr)) - ath10k_htt_rx_amsdu(htt, rx_status, msdu_head); - else - ath10k_htt_rx_msdu(htt, rx_status, msdu_head); + for (i = 0; i < num_mpdu_ranges; i++) + mpdu_count += mpdu_ranges[i].mpdu_count; + + while (mpdu_count--) { + __skb_queue_head_init(&amsdu); + ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, + &fw_desc_len, &amsdu); + if (ret < 0) { + ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); + __skb_queue_purge(&amsdu); + /* FIXME: It's probably a good idea to reboot the + * device instead of leaving it inoperable. + */ + htt->rx_confused = true; + break; } + + ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); + ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); + ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); + ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); + ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); } tasklet_schedule(&htt->rx_replenish_task); @@ -1339,108 +1414,44 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, struct htt_rx_fragment_indication *frag) { struct ath10k *ar = htt->ar; - struct sk_buff *msdu_head, *msdu_tail; - enum htt_rx_mpdu_encrypt_type enctype; - struct htt_rx_desc *rxd; - enum rx_msdu_decap_format fmt; struct ieee80211_rx_status *rx_status = &htt->rx_status; - struct ieee80211_hdr *hdr; + struct sk_buff_head amsdu; int ret; - bool tkip_mic_err; - bool decrypt_err; u8 *fw_desc; - int fw_desc_len, hdrlen, paramlen; - int trim; - u32 attention = 0; + int fw_desc_len; fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); fw_desc = (u8 *)frag->fw_msdu_rx_desc; - msdu_head = NULL; - msdu_tail = NULL; + __skb_queue_head_init(&amsdu); spin_lock_bh(&htt->rx_ring.lock); ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, - &msdu_head, &msdu_tail, - &attention); + &amsdu); spin_unlock_bh(&htt->rx_ring.lock); + tasklet_schedule(&htt->rx_replenish_task); + ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); if (ret) { ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n", ret); - ath10k_htt_rx_free_msdu_chain(msdu_head); + __skb_queue_purge(&amsdu); return; } - /* FIXME: implement signal strength */ - rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; - - hdr = (struct ieee80211_hdr *)msdu_head->data; - rxd = (void *)msdu_head->data - sizeof(*rxd); - tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); - decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - - if (fmt != RX_MSDU_DECAP_RAW) { - ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n"); - dev_kfree_skb_any(msdu_head); - goto end; - } - - enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); - ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt, - true); - msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); - - if (tkip_mic_err) - ath10k_warn(ar, "tkip mic error\n"); - - if (decrypt_err) { - ath10k_warn(ar, "decryption err in fragmented rx\n"); - dev_kfree_skb_any(msdu_head); - goto end; - } - - if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) { - hdrlen = ieee80211_hdrlen(hdr->frame_control); - paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype); - - /* It is more efficient to move the header than the payload */ - memmove((void *)msdu_head->data + paramlen, - (void *)msdu_head->data, - hdrlen); - skb_pull(msdu_head, paramlen); - hdr = (struct ieee80211_hdr *)msdu_head->data; - } - - /* remove trailing FCS */ - trim = 4; - - /* remove crypto trailer */ - trim += ath10k_htt_rx_crypto_tail_len(ar, enctype); - - /* last fragment of TKIP frags has MIC */ - if (!ieee80211_has_morefrags(hdr->frame_control) && - enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) - trim += 8; - - if (trim > msdu_head->len) { - ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n"); - dev_kfree_skb_any(msdu_head); - goto end; + if (skb_queue_len(&amsdu) != 1) { + ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n"); + __skb_queue_purge(&amsdu); + return; } - skb_trim(msdu_head, msdu_head->len - trim); - - ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", - msdu_head->data, msdu_head->len); - ath10k_process_rx(htt->ar, rx_status, msdu_head); + ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); + ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); + ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); + ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); -end: if (fw_desc_len > 0) { ath10k_dbg(ar, ATH10K_DBG_HTT, "expecting more fragmented rx in one indication %d\n", @@ -1674,6 +1685,15 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_RX_DELBA: ath10k_htt_rx_delba(ar, resp); break; + case HTT_T2H_MSG_TYPE_PKTLOG: { + struct ath10k_pktlog_hdr *hdr = + (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload; + + trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, + sizeof(*hdr) + + __le16_to_cpu(hdr->size)); + break; + } case HTT_T2H_MSG_TYPE_RX_FLUSH: { /* Ignore this event because mac80211 takes care of Rx * aggregation reordering. @@ -1681,8 +1701,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } default: - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n", - resp->hdr.msg_type); + ath10k_warn(ar, "htt event (%d) not handled\n", + resp->hdr.msg_type); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); break; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index bd87a35201d895856620719dc5477fbedb3d7062..4bc51d8a14a3aed706429f769c33f56226ee366d 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -92,7 +92,6 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) struct ath10k *ar = htt->ar; spin_lock_init(&htt->tx_lock); - init_waitqueue_head(&htt->empty_tx_wq); if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features)) htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; @@ -555,14 +554,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); - skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); + skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); + skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq); + trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n", + "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", flags0, flags1, msdu->len, msdu_id, frags_paddr, - (u32)skb_cb->paddr, vdev_id, tid); + (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); + trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); + trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 3cf5702c1e7ef7ee49fa2b6860b08854fc48b53b..dfedfd0e0f344a8b1893de5262de261c5f403e44 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -20,15 +20,16 @@ #include "targaddrs.h" +#define ATH10K_FW_DIR "ath10k" + /* QCA988X 1.0 definitions (unsupported) */ #define QCA988X_HW_1_0_CHIP_ID_REV 0x0 /* QCA988X 2.0 definitions */ #define QCA988X_HW_2_0_VERSION 0x4100016c #define QCA988X_HW_2_0_CHIP_ID_REV 0x2 -#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" +#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0" #define QCA988X_HW_2_0_FW_FILE "firmware.bin" -#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin" #define QCA988X_HW_2_0_OTP_FILE "otp.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 @@ -43,6 +44,8 @@ #define REG_DUMP_COUNT_QCA988X 60 +#define QCA988X_CAL_DATA_LEN 2116 + struct ath10k_fw_ie { __le32 id; __le32 len; @@ -78,6 +81,15 @@ enum ath10k_mcast2ucast_mode { ATH10K_MCAST2UCAST_ENABLED = 1, }; +struct ath10k_pktlog_hdr { + __le16 flags; + __le16 missed_cnt; + __le16 log_type; + __le16 size; + __le32 timestamp; + u8 payload[0]; +} __packed; + /* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 @@ -85,11 +97,13 @@ enum ath10k_mcast2ucast_mode { #define TARGET_DMA_BURST_SIZE 0 #define TARGET_MAC_AGGR_DELIM 0 #define TARGET_AST_SKID_LIMIT 16 -#define TARGET_NUM_PEERS 16 +#define TARGET_NUM_STATIONS 16 +#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \ + (TARGET_NUM_VDEVS)) #define TARGET_NUM_OFFLOAD_PEERS 0 #define TARGET_NUM_OFFLOAD_REORDER_BUFS 0 #define TARGET_NUM_PEER_KEYS 2 -#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS))) +#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2) #define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_RX_TIMEOUT_LO_PRI 100 @@ -120,12 +134,15 @@ enum ath10k_mcast2ucast_mode { #define TARGET_10X_DMA_BURST_SIZE 0 #define TARGET_10X_MAC_AGGR_DELIM 0 #define TARGET_10X_AST_SKID_LIMIT 16 -#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS)) -#define TARGET_10X_NUM_PEERS_MAX 128 +#define TARGET_10X_NUM_STATIONS 128 +#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \ + (TARGET_10X_NUM_VDEVS)) #define TARGET_10X_NUM_OFFLOAD_PEERS 0 #define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0 #define TARGET_10X_NUM_PEER_KEYS 2 -#define TARGET_10X_NUM_TIDS 256 +#define TARGET_10X_NUM_TIDS_MAX 256 +#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ + (TARGET_10X_NUM_PEERS) * 2) #define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_TIMEOUT_LO_PRI 100 @@ -279,6 +296,7 @@ enum ath10k_mcast2ucast_mode { #define SI_RX_DATA1_OFFSET 0x00000014 #define CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800 #define CORE_CTRL_ADDRESS 0x0000 #define PCIE_INTR_ENABLE_ADDRESS 0x0008 #define PCIE_INTR_CAUSE_ADDRESS 0x000c diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 46709301a51e1986bb4b505b659d7f94bbfca6d9..c4005670cba2e6ac934e8804629735e7b9180792 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -136,7 +136,9 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, if (ret) return ret; + spin_lock_bh(&ar->data_lock); peer->keys[i] = arvif->wep_keys[i]; + spin_unlock_bh(&ar->data_lock); } return 0; @@ -173,12 +175,39 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", i, ret); + spin_lock_bh(&ar->data_lock); peer->keys[i] = NULL; + spin_unlock_bh(&ar->data_lock); } return first_errno; } +bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, + u8 keyidx) +{ + struct ath10k_peer *peer; + int i; + + lockdep_assert_held(&ar->data_lock); + + /* We don't know which vdev this peer belongs to, + * since WMI doesn't give us that information. + * + * FIXME: multi-bss needs to be handled. + */ + peer = ath10k_peer_find(ar, 0, addr); + if (!peer) + return false; + + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) + return true; + } + + return false; +} + static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, struct ieee80211_key_conf *key) { @@ -326,6 +355,9 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) lockdep_assert_held(&ar->conf_mutex); + if (ar->num_peers >= ar->max_num_peers) + return -ENOBUFS; + ret = ath10k_wmi_peer_create(ar, vdev_id, addr); if (ret) { ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", @@ -339,9 +371,8 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) addr, vdev_id, ret); return ret; } - spin_lock_bh(&ar->data_lock); + ar->num_peers++; - spin_unlock_bh(&ar->data_lock); return 0; } @@ -391,15 +422,11 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) return 0; } -static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) +static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) { struct ath10k *ar = arvif->ar; u32 vdev_param; - if (value != 0xFFFFFFFF) - value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold, - ATH10K_RTS_MAX); - vdev_param = ar->wmi.vdev_param->rts_threshold; return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); } @@ -432,9 +459,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) if (ret) return ret; - spin_lock_bh(&ar->data_lock); ar->num_peers--; - spin_unlock_bh(&ar->data_lock); return 0; } @@ -471,20 +496,59 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar) list_del(&peer->list); kfree(peer); } - ar->num_peers = 0; spin_unlock_bh(&ar->data_lock); + + ar->num_peers = 0; + ar->num_stations = 0; } /************************/ /* Interface management */ /************************/ +void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->data_lock); + + if (!arvif->beacon) + return; + + if (!arvif->beacon_buf) + dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, + arvif->beacon->len, DMA_TO_DEVICE); + + dev_kfree_skb_any(arvif->beacon); + + arvif->beacon = NULL; + arvif->beacon_sent = false; +} + +static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->data_lock); + + ath10k_mac_vif_beacon_free(arvif); + + if (arvif->beacon_buf) { + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, arvif->beacon_paddr); + arvif->beacon_buf = NULL; + } +} + static inline int ath10k_vdev_setup_sync(struct ath10k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return -ESHUTDOWN; + ret = wait_for_completion_timeout(&ar->vdev_setup_done, ATH10K_VDEV_SETUP_TIMEOUT_HZ); if (ret == 0) @@ -517,6 +581,8 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) arg.channel.max_reg_power = channel->max_reg_power * 2; arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; + reinit_completion(&ar->vdev_setup_done); + ret = ath10k_wmi_vdev_start(ar, &arg); if (ret) { ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", @@ -564,6 +630,8 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar) ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", ar->monitor_vdev_id, ret); + reinit_completion(&ar->vdev_setup_done); + ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", @@ -590,9 +658,9 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar) return -ENOMEM; } - bit = ffs(ar->free_vdev_map); + bit = __ffs64(ar->free_vdev_map); - ar->monitor_vdev_id = bit - 1; + ar->monitor_vdev_id = bit; ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, WMI_VDEV_TYPE_MONITOR, @@ -603,7 +671,7 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar) return ret; } - ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); + ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", ar->monitor_vdev_id); @@ -623,7 +691,7 @@ static int ath10k_monitor_vdev_delete(struct ath10k *ar) return ret; } - ar->free_vdev_map |= 1 << ar->monitor_vdev_id; + ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", ar->monitor_vdev_id); @@ -909,15 +977,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif, arvif->is_up = false; spin_lock_bh(&arvif->ar->data_lock); - if (arvif->beacon) { - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - - arvif->beacon = NULL; - arvif->beacon_sent = false; - } + ath10k_mac_vif_beacon_free(arvif); spin_unlock_bh(&arvif->ar->data_lock); return; @@ -966,14 +1026,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, if (is_zero_ether_addr(arvif->bssid)) return; - ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, - arvif->bssid); - if (ret) { - ath10k_warn(ar, "failed to delete IBSS BSSID peer %pM for vdev %d: %d\n", - arvif->bssid, arvif->vdev_id, ret); - return; - } - memset(arvif->bssid, 0, ETH_ALEN); return; @@ -1042,51 +1094,45 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) /* Station management */ /**********************/ +static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, + struct ieee80211_vif *vif) +{ + /* Some firmware revisions have unstable STA powersave when listen + * interval is set too high (e.g. 5). The symptoms are firmware doesn't + * generate NullFunc frames properly even if buffered frames have been + * indicated in Beacon TIM. Firmware would seldom wake up to pull + * buffered frames. Often pinging the device from AP would simply fail. + * + * As a workaround set it to 1. + */ + if (vif->type == NL80211_IFTYPE_STATION) + return 1; + + return ar->hw->conf.listen_interval; +} + static void ath10k_peer_assoc_h_basic(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + lockdep_assert_held(&ar->conf_mutex); ether_addr_copy(arg->addr, sta->addr); arg->vdev_id = arvif->vdev_id; arg->peer_aid = sta->aid; arg->peer_flags |= WMI_PEER_AUTH; - - if (arvif->vdev_type == WMI_VDEV_TYPE_STA) - /* - * Seems FW have problems with Power Save in STA - * mode when we setup this parameter to high (eg. 5). - * Often we see that FW don't send NULL (with clean P flags) - * frame even there is info about buffered frames in beacons. - * Sometimes we have to wait more than 10 seconds before FW - * will wakeup. Often sending one ping from AP to our device - * just fail (more than 50%). - * - * Seems setting this FW parameter to 1 couse FW - * will check every beacon and will wakup immediately - * after detection buffered data. - */ - arg->peer_listen_intval = 1; - else - arg->peer_listen_intval = ar->hw->conf.listen_interval; - + arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); arg->peer_num_spatial_streams = 1; - - /* - * The assoc capabilities are available only in managed mode. - */ - if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf) - arg->peer_caps = bss_conf->assoc_capability; + arg->peer_caps = vif->bss_conf.assoc_capability; } static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct wmi_peer_assoc_complete_arg *arg) { - struct ieee80211_vif *vif = arvif->vif; struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_bss *bss; const u8 *rsnie = NULL; @@ -1343,11 +1389,12 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, } static void ath10k_peer_assoc_h_qos(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: if (sta->wme) @@ -1359,7 +1406,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, } break; case WMI_VDEV_TYPE_STA: - if (bss_conf->qos) + if (vif->bss_conf.qos) arg->peer_flags |= WMI_PEER_QOS; break; default: @@ -1368,7 +1415,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, } static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { @@ -1419,22 +1466,21 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, } static int ath10k_peer_assoc_prepare(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { lockdep_assert_held(&ar->conf_mutex); memset(arg, 0, sizeof(*arg)); - ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg); - ath10k_peer_assoc_h_crypto(ar, arvif, arg); + ath10k_peer_assoc_h_basic(ar, vif, sta, arg); + ath10k_peer_assoc_h_crypto(ar, vif, arg); ath10k_peer_assoc_h_rates(ar, sta, arg); ath10k_peer_assoc_h_ht(ar, sta, arg); ath10k_peer_assoc_h_vht(ar, sta, arg); - ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg); - ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg); + ath10k_peer_assoc_h_qos(ar, vif, sta, arg); + ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); return 0; } @@ -1480,6 +1526,9 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, lockdep_assert_held(&ar->conf_mutex); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", + arvif->vdev_id, arvif->bssid, arvif->aid); + rcu_read_lock(); ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); @@ -1494,8 +1543,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, * before calling ath10k_setup_peer_smps() which might sleep. */ ht_cap = ap_sta->ht_cap; - ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, - bss_conf, &peer_arg); + ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); if (ret) { ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); @@ -1523,6 +1571,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, "mac vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, bss_conf->aid); + WARN_ON(arvif->is_up); + arvif->aid = bss_conf->aid; ether_addr_copy(arvif->bssid, bss_conf->bssid); @@ -1536,9 +1586,6 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, arvif->is_up = true; } -/* - * FIXME: flush TIDs - */ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1548,45 +1595,30 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, lockdep_assert_held(&ar->conf_mutex); - /* - * For some reason, calling VDEV-DOWN before VDEV-STOP - * makes the FW to send frames via HTT after disassociation. - * No idea why this happens, even though VDEV-DOWN is supposed - * to be analogous to link down, so just stop the VDEV. - */ - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n", - arvif->vdev_id); - - /* FIXME: check return value */ - ret = ath10k_vdev_stop(arvif); - - /* - * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and - * report beacons from previously associated network through HTT. - * This in turn would spam mac80211 WARN_ON if we bring down all - * interfaces as it expects there is no rx when no interface is - * running. - */ - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", + arvif->vdev_id, arvif->bssid); - /* FIXME: why don't we print error if wmi call fails? */ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath10k_warn(ar, "faield to down vdev %i: %d\n", + arvif->vdev_id, ret); arvif->def_wep_key_idx = 0; - - arvif->is_started = false; arvif->is_up = false; } -static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, - struct ieee80211_sta *sta, bool reassoc) +static int ath10k_station_assoc(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool reassoc) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct wmi_peer_assoc_complete_arg peer_arg; int ret = 0; lockdep_assert_held(&ar->conf_mutex); - ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); + ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); if (ret) { ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); @@ -1601,43 +1633,51 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, return ret; } - ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap); - if (ret) { - ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", - arvif->vdev_id, ret); - return ret; - } - - if (!sta->wme && !reassoc) { - arvif->num_legacy_stations++; - ret = ath10k_recalc_rtscts_prot(arvif); + /* Re-assoc is run only to update supported rates for given station. It + * doesn't make much sense to reconfigure the peer completely. + */ + if (!reassoc) { + ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, + &sta->ht_cap); if (ret) { - ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", + ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } - } - ret = ath10k_install_peer_wep_keys(arvif, sta->addr); - if (ret) { - ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", - arvif->vdev_id, ret); - return ret; - } + ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); + if (ret) { + ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", + sta->addr, arvif->vdev_id, ret); + return ret; + } - ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); - if (ret) { - ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", - sta->addr, arvif->vdev_id, ret); - return ret; + if (!sta->wme) { + arvif->num_legacy_stations++; + ret = ath10k_recalc_rtscts_prot(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + ret = ath10k_install_peer_wep_keys(arvif, sta->addr); + if (ret) { + ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } } return ret; } -static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, +static int ath10k_station_disassoc(struct ath10k *ar, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); int ret = 0; lockdep_assert_held(&ar->conf_mutex); @@ -1729,6 +1769,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) ch->passive = passive; ch->freq = channel->center_freq; + ch->band_center_freq1 = channel->center_freq; ch->min_power = 0; ch->max_power = channel->max_power * 2; ch->max_reg_power = channel->max_reg_power * 2; @@ -1983,6 +2024,18 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, } } +static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar) +{ + /* FIXME: Not really sure since when the behaviour changed. At some + * point new firmware stopped requiring creation of peer entries for + * offchannel tx (and actually creating them causes issues with wmi-htc + * tx credit replenishment and reliability). Assuming it's at least 3.4 + * because that's when the `freq` was introduced to TX_FRM HTT command. + */ + return !(ar->htt.target_version_major >= 3 && + ar->htt.target_version_minor >= 4); +} + static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -2158,10 +2211,10 @@ void __ath10k_scan_finish(struct ath10k *ar) case ATH10K_SCAN_IDLE: break; case ATH10K_SCAN_RUNNING: - case ATH10K_SCAN_ABORTING: if (ar->scan.is_roc) ieee80211_remain_on_channel_expired(ar->hw); - else + case ATH10K_SCAN_ABORTING: + if (!ar->scan.is_roc) ieee80211_scan_completed(ar->hw, (ar->scan.state == ATH10K_SCAN_ABORTING)); @@ -2327,23 +2380,28 @@ static void ath10k_tx(struct ieee80211_hw *hw, if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { spin_lock_bh(&ar->data_lock); - ATH10K_SKB_CB(skb)->htt.is_offchan = true; + ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq; ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; spin_unlock_bh(&ar->data_lock); - ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", - skb); + if (ath10k_mac_need_offchan_tx_work(ar)) { + ATH10K_SKB_CB(skb)->htt.freq = 0; + ATH10K_SKB_CB(skb)->htt.is_offchan = true; - skb_queue_tail(&ar->offchan_tx_queue, skb); - ieee80211_queue_work(hw, &ar->offchan_tx_work); - return; + ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", + skb); + + skb_queue_tail(&ar->offchan_tx_queue, skb); + ieee80211_queue_work(hw, &ar->offchan_tx_work); + return; + } } ath10k_tx_htt(ar, skb); } /* Must not be called with conf_mutex held as workers can use that also. */ -static void ath10k_drain_tx(struct ath10k *ar) +void ath10k_drain_tx(struct ath10k *ar) { /* make sure rcu-protected mac80211 tx path itself is drained */ synchronize_net(); @@ -2376,16 +2434,8 @@ void ath10k_halt(struct ath10k *ar) ath10k_hif_power_down(ar); spin_lock_bh(&ar->data_lock); - list_for_each_entry(arvif, &ar->arvifs, list) { - if (!arvif->beacon) - continue; - - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - arvif->beacon = NULL; - } + list_for_each_entry(arvif, &ar->arvifs, list) + ath10k_mac_vif_beacon_cleanup(arvif); spin_unlock_bh(&ar->data_lock); } @@ -2408,12 +2458,28 @@ static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) return 0; } +static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) +{ + /* It is not clear that allowing gaps in chainmask + * is helpful. Probably it will not do what user + * is hoping for, so warn in that case. + */ + if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) + return; + + ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", + dbg, cm); +} + static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) { int ret; lockdep_assert_held(&ar->conf_mutex); + ath10k_check_chain_mask(ar, tx_ant, "tx"); + ath10k_check_chain_mask(ar, rx_ant, "rx"); + ar->cfg_tx_chainmask = tx_ant; ar->cfg_rx_chainmask = rx_ant; @@ -2677,12 +2743,68 @@ static void ath10k_config_chan(struct ath10k *ar) ath10k_monitor_recalc(ar); } +static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) +{ + int ret; + u32 param; + + lockdep_assert_held(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); + + param = ar->wmi.pdev_param->txpower_limit2g; + ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); + if (ret) { + ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", + txpower, ret); + return ret; + } + + param = ar->wmi.pdev_param->txpower_limit5g; + ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); + if (ret) { + ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", + txpower, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_txpower_recalc(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int ret, txpower = -1; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + WARN_ON(arvif->txpower < 0); + + if (txpower == -1) + txpower = arvif->txpower; + else + txpower = min(txpower, arvif->txpower); + } + + if (WARN_ON(txpower == -1)) + return -EINVAL; + + ret = ath10k_mac_txpower_setup(ar, txpower); + if (ret) { + ath10k_warn(ar, "failed to setup tx power %d: %d\n", + txpower, ret); + return ret; + } + + return 0; +} + static int ath10k_config(struct ieee80211_hw *hw, u32 changed) { struct ath10k *ar = hw->priv; struct ieee80211_conf *conf = &hw->conf; int ret = 0; - u32 param; mutex_lock(&ar->conf_mutex); @@ -2706,25 +2828,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) } } - if (changed & IEEE80211_CONF_CHANGE_POWER) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n", - hw->conf.power_level); - - param = ar->wmi.pdev_param->txpower_limit2g; - ret = ath10k_wmi_pdev_set_param(ar, param, - hw->conf.power_level * 2); - if (ret) - ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", - hw->conf.power_level, ret); - - param = ar->wmi.pdev_param->txpower_limit5g; - ret = ath10k_wmi_pdev_set_param(ar, param, - hw->conf.power_level * 2); - if (ret) - ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", - hw->conf.power_level, ret); - } - if (changed & IEEE80211_CONF_CHANGE_PS) ath10k_config_ps(ar); @@ -2739,6 +2842,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) return ret; } +static u32 get_nss_from_chainmask(u16 chain_mask) +{ + if ((chain_mask & 0x15) == 0x15) + return 4; + else if ((chain_mask & 0x7) == 0x7) + return 3; + else if ((chain_mask & 0x3) == 0x3) + return 2; + return 1; +} + /* * TODO: * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, @@ -2772,9 +2886,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = -EBUSY; goto err; } - bit = ffs(ar->free_vdev_map); + bit = __ffs64(ar->free_vdev_map); - arvif->vdev_id = bit - 1; + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", + bit, ar->free_vdev_map); + + arvif->vdev_id = bit; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; if (ar->p2p) @@ -2804,8 +2921,39 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, break; } - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n", - arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); + /* Some firmware revisions don't wait for beacon tx completion before + * sending another SWBA event. This could lead to hardware using old + * (freed) beacon data in some cases, e.g. tx credit starvation + * combined with missed TBTT. This is very very rare. + * + * On non-IOMMU-enabled hosts this could be a possible security issue + * because hw could beacon some random data on the air. On + * IOMMU-enabled hosts DMAR faults would occur in most cases and target + * device would crash. + * + * Since there are no beacon tx completions (implicit nor explicit) + * propagated to host the only workaround for this is to allocate a + * DMA-coherent buffer for a lifetime of a vif and use it for all + * beacon tx commands. Worst case for this approach is some beacons may + * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. + */ + if (vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_AP) { + arvif->beacon_buf = dma_zalloc_coherent(ar->dev, + IEEE80211_MAX_FRAME_LEN, + &arvif->beacon_paddr, + GFP_ATOMIC); + if (!arvif->beacon_buf) { + ret = -ENOMEM; + ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", + ret); + goto err; + } + } + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", + arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, + arvif->beacon_buf ? "single-buf" : "per-skb"); ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, vif->addr); @@ -2815,7 +2963,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err; } - ar->free_vdev_map &= ~(1 << arvif->vdev_id); + ar->free_vdev_map &= ~(1LL << arvif->vdev_id); list_add(&arvif->list, &ar->arvifs); vdev_param = ar->wmi.vdev_param->def_keyid; @@ -2837,6 +2985,20 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err_vdev_delete; } + if (ar->cfg_tx_chainmask) { + u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); + + vdev_param = ar->wmi.vdev_param->nss; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + nss); + if (ret) { + ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", + arvif->vdev_id, ar->cfg_tx_chainmask, nss, + ret); + goto err_vdev_delete; + } + } + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); if (ret) { @@ -2899,6 +3061,13 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err_peer_delete; } + arvif->txpower = vif->bss_conf.txpower; + ret = ath10k_mac_txpower_recalc(ar); + if (ret) { + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + goto err_peer_delete; + } + mutex_unlock(&ar->conf_mutex); return 0; @@ -2908,10 +3077,16 @@ err_peer_delete: err_vdev_delete: ath10k_wmi_vdev_delete(ar, arvif->vdev_id); - ar->free_vdev_map |= 1 << arvif->vdev_id; + ar->free_vdev_map |= 1LL << arvif->vdev_id; list_del(&arvif->list); err: + if (arvif->beacon_buf) { + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, arvif->beacon_paddr); + arvif->beacon_buf = NULL; + } + mutex_unlock(&ar->conf_mutex); return ret; @@ -2924,19 +3099,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); int ret; - mutex_lock(&ar->conf_mutex); - cancel_work_sync(&arvif->wep_key_work); - spin_lock_bh(&ar->data_lock); - if (arvif->beacon) { - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - arvif->beacon = NULL; - } + mutex_lock(&ar->conf_mutex); + spin_lock_bh(&ar->data_lock); + ath10k_mac_vif_beacon_cleanup(arvif); spin_unlock_bh(&ar->data_lock); ret = ath10k_spectral_vif_stop(arvif); @@ -2944,7 +3112,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", arvif->vdev_id, ret); - ar->free_vdev_map |= 1 << arvif->vdev_id; + ar->free_vdev_map |= 1LL << arvif->vdev_id; list_del(&arvif->list); if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { @@ -3068,54 +3236,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, arvif->u.ap.hidden_ssid = info->hidden_ssid; } - /* - * Firmware manages AP self-peer internally so make sure to not create - * it in driver. Otherwise AP self-peer deletion may timeout later. - */ - if (changed & BSS_CHANGED_BSSID && - vif->type != NL80211_IFTYPE_AP) { - if (!is_zero_ether_addr(info->bssid)) { - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d create peer %pM\n", - arvif->vdev_id, info->bssid); - - ret = ath10k_peer_create(ar, arvif->vdev_id, - info->bssid); - if (ret) - ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n", - info->bssid, arvif->vdev_id, ret); - - if (vif->type == NL80211_IFTYPE_STATION) { - /* - * this is never erased as we it for crypto key - * clearing; this is FW requirement - */ - ether_addr_copy(arvif->bssid, info->bssid); - - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d start %pM\n", - arvif->vdev_id, info->bssid); - - ret = ath10k_vdev_start(arvif); - if (ret) { - ath10k_warn(ar, "failed to start vdev %i: %d\n", - arvif->vdev_id, ret); - goto exit; - } - - arvif->is_started = true; - } - - /* - * Mac80211 does not keep IBSS bssid when leaving IBSS, - * so driver need to store it. It is needed when leaving - * IBSS in order to remove BSSID peer. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) - memcpy(arvif->bssid, info->bssid, - ETH_ALEN); - } - } + if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) + ether_addr_copy(arvif->bssid, info->bssid); if (changed & BSS_CHANGED_BEACON_ENABLED) ath10k_control_beaconing(arvif, info); @@ -3177,10 +3299,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ath10k_monitor_stop(ar); ath10k_bss_assoc(hw, vif, info); ath10k_monitor_recalc(ar); + } else { + ath10k_bss_disassoc(hw, vif); } } -exit: + if (changed & BSS_CHANGED_TXPOWER) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", + arvif->vdev_id, info->txpower); + + arvif->txpower = info->txpower; + ret = ath10k_mac_txpower_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + } + mutex_unlock(&ar->conf_mutex); } @@ -3266,9 +3399,10 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - cancel_delayed_work_sync(&ar->scan.timeout); ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); + + cancel_delayed_work_sync(&ar->scan.timeout); } static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, @@ -3453,7 +3587,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", sta->addr); - err = ath10k_station_assoc(ar, arvif, sta, true); + err = ath10k_station_assoc(ar, arvif->vif, sta, true); if (err) ath10k_warn(ar, "failed to reassociate station: %pM\n", sta->addr); @@ -3462,6 +3596,37 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) mutex_unlock(&ar->conf_mutex); } +static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + return 0; + + if (ar->num_stations >= ar->max_num_stations) + return -ENOBUFS; + + ar->num_stations++; + + return 0; +} + +static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + return; + + ar->num_stations--; +} + static int ath10k_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -3471,7 +3636,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; - int max_num_peers; int ret = 0; if (old_state == IEEE80211_STA_NOTEXIST && @@ -3489,31 +3653,46 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); if (old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE && - vif->type != NL80211_IFTYPE_STATION) { + new_state == IEEE80211_STA_NONE) { /* * New station addition. */ - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1; - else - max_num_peers = TARGET_NUM_PEERS; + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", + arvif->vdev_id, sta->addr, + ar->num_stations + 1, ar->max_num_stations, + ar->num_peers + 1, ar->max_num_peers); - if (ar->num_peers >= max_num_peers) { - ath10k_warn(ar, "number of peers exceeded: peers number %d (max peers %d)\n", - ar->num_peers, max_num_peers); - ret = -ENOBUFS; + ret = ath10k_mac_inc_num_stations(arvif); + if (ret) { + ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", + ar->max_num_stations); goto exit; } - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d peer create %pM (new sta) num_peers %d\n", - arvif->vdev_id, sta->addr, ar->num_peers); - ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); - if (ret) + if (ret) { ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", sta->addr, arvif->vdev_id, ret); + ath10k_mac_dec_num_stations(arvif); + goto exit; + } + + if (vif->type == NL80211_IFTYPE_STATION) { + WARN_ON(arvif->is_started); + + ret = ath10k_vdev_start(arvif); + if (ret) { + ath10k_warn(ar, "failed to start vdev %i: %d\n", + arvif->vdev_id, ret); + WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id, + sta->addr)); + ath10k_mac_dec_num_stations(arvif); + goto exit; + } + + arvif->is_started = true; + } } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { /* @@ -3522,13 +3701,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d peer delete %pM (sta gone)\n", arvif->vdev_id, sta->addr); + + if (vif->type == NL80211_IFTYPE_STATION) { + WARN_ON(!arvif->is_started); + + ret = ath10k_vdev_stop(arvif); + if (ret) + ath10k_warn(ar, "failed to stop vdev %i: %d\n", + arvif->vdev_id, ret); + + arvif->is_started = false; + } + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", sta->addr, arvif->vdev_id, ret); - if (vif->type == NL80211_IFTYPE_STATION) - ath10k_bss_disassoc(hw, vif); + ath10k_mac_dec_num_stations(arvif); } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC && (vif->type == NL80211_IFTYPE_AP || @@ -3539,7 +3729,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", sta->addr); - ret = ath10k_station_assoc(ar, arvif, sta, false); + ret = ath10k_station_assoc(ar, vif, sta, false); if (ret) ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); @@ -3553,7 +3743,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", sta->addr); - ret = ath10k_station_disassoc(ar, arvif, sta); + ret = ath10k_station_disassoc(ar, vif, sta); if (ret) ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); @@ -3717,6 +3907,8 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, if (ret) goto exit; + duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC); + memset(&arg, 0, sizeof(arg)); ath10k_wmi_start_scan_init(ar, &arg); arg.vdev_id = arvif->vdev_id; @@ -3761,10 +3953,11 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - cancel_delayed_work_sync(&ar->scan.timeout); ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); + cancel_delayed_work_sync(&ar->scan.timeout); + return 0; } @@ -3807,7 +4000,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n", arvif->vdev_id, value); - ret = ath10k_mac_set_rts(arvif, value); + ret = ath10k_mac_set_frag(arvif, value); if (ret) { ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n", arvif->vdev_id, ret); @@ -3843,7 +4036,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, empty = (ar->htt.num_pending_tx == 0); spin_unlock_bh(&ar->htt.tx_lock); - skip = (ar->state == ATH10K_STATE_WEDGED); + skip = (ar->state == ATH10K_STATE_WEDGED) || + test_bit(ATH10K_FLAG_CRASH_FLUSH, + &ar->dev_flags); (empty || skip); }), ATH10K_FLUSH_TIMEOUT_HZ); @@ -3929,10 +4124,14 @@ exit: } #endif -static void ath10k_restart_complete(struct ieee80211_hw *hw) +static void ath10k_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) { struct ath10k *ar = hw->priv; + if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) + return; + mutex_lock(&ar->conf_mutex); /* If device failed to restart it will be in a different state, e.g. @@ -3940,6 +4139,7 @@ static void ath10k_restart_complete(struct ieee80211_hw *hw) if (ar->state == ATH10K_STATE_RESTARTED) { ath10k_info(ar, "device successfully recovered\n"); ar->state = ATH10K_STATE_ON; + ieee80211_wake_queues(ar->hw); } mutex_unlock(&ar->conf_mutex); @@ -3975,6 +4175,9 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, survey->channel = &sband->channels[idx]; + if (ar->rx_channel == survey->channel) + survey->filled |= SURVEY_INFO_IN_USE; + exit: mutex_unlock(&ar->conf_mutex); return ret; @@ -4022,6 +4225,10 @@ ath10k_default_bitrate_mask(struct ath10k *ar, u32 legacy = 0x00ff; u8 ht = 0xff, i; u16 vht = 0x3ff; + u16 nrf = ar->num_rf_chains; + + if (ar->cfg_tx_chainmask) + nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask); switch (band) { case IEEE80211_BAND_2GHZ: @@ -4037,11 +4244,11 @@ ath10k_default_bitrate_mask(struct ath10k *ar, if (mask->control[band].legacy != legacy) return false; - for (i = 0; i < ar->num_rf_chains; i++) + for (i = 0; i < nrf; i++) if (mask->control[band].ht_mcs[i] != ht) return false; - for (i = 0; i < ar->num_rf_chains; i++) + for (i = 0; i < nrf; i++) if (mask->control[band].vht_mcs[i] != vht) return false; @@ -4292,6 +4499,9 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw, u8 fixed_nss = ar->num_rf_chains; u8 force_sgi; + if (ar->cfg_tx_chainmask) + fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); + force_sgi = mask->control[band].gi; if (force_sgi == NL80211_TXRATE_FORCE_LGI) return -EINVAL; @@ -4450,12 +4660,15 @@ static const struct ieee80211_ops ath10k_ops = { .tx_last_beacon = ath10k_tx_last_beacon, .set_antenna = ath10k_set_antenna, .get_antenna = ath10k_get_antenna, - .restart_complete = ath10k_restart_complete, + .reconfig_complete = ath10k_reconfig_complete, .get_survey = ath10k_get_survey, .set_bitrate_mask = ath10k_set_bitrate_mask, .sta_rc_update = ath10k_sta_rc_update, .get_tsf = ath10k_get_tsf, .ampdu_action = ath10k_ampdu_action, + .get_et_sset_count = ath10k_debug_get_et_sset_count, + .get_et_stats = ath10k_debug_get_et_stats, + .get_et_strings = ath10k_debug_get_et_strings, CFG80211_TESTMODE_CMD(ath10k_tm_cmd) @@ -4800,15 +5013,6 @@ int ath10k_mac_register(struct ath10k *ar) BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { - /* TODO: Have to deal with 2x2 chips if/when the come out. */ - ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK; - ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK; - } else { - ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK; - ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK; - } - ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask; ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask; @@ -4827,10 +5031,6 @@ int ath10k_mac_register(struct ath10k *ar) IEEE80211_HW_AP_LINK_PS | IEEE80211_HW_SPECTRUM_MGMT; - /* MSDU can have HTT TX fragment pushed in front. The additional 4 - * bytes is used for padding/alignment if necessary. */ - ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4; - ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) @@ -4854,6 +5054,8 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->max_remain_on_channel_duration = 5000; ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; + /* * on LL hardware queues are managed entirely by the FW * so we only advertise to mac we can do the queues thing diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 6c80eeada3e28863bb1fe22e753251b64f73608d..68296117d20333e9b3eb428c50b2e15f82fbd648 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -21,6 +21,8 @@ #include #include "core.h" +#define WEP_KEYID_SHIFT 6 + struct ath10k_generic_iter { struct ath10k *ar; int ret; @@ -39,6 +41,10 @@ void ath10k_offchan_tx_work(struct work_struct *work); void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar); void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work); void ath10k_halt(struct ath10k *ar); +void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif); +void ath10k_drain_tx(struct ath10k *ar); +bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, + u8 keyidx); static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 59e0ea83be5098ea261f35330ba1659749b0eeb8..7abb8367119a872f0de5063d9ff1f75025abcebd 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -485,6 +485,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, void *data_buf = NULL; int i; + spin_lock_bh(&ar_pci->ce_lock); + ce_diag = ar_pci->ce_diag; /* @@ -511,7 +513,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); - ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); + ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); if (ret != 0) goto done; @@ -527,15 +529,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); - ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, - 0); + ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, + 0); if (ret) goto done; i = 0; - while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id) != 0) { + while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, + &completed_nbytes, + &id) != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { ret = -EBUSY; @@ -554,9 +556,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } i = 0; - while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { + while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, + &completed_nbytes, + &id, &flags) != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -591,6 +593,8 @@ done: dma_free_coherent(ar->dev, orig_nbytes, data_buf, ce_data_base); + spin_unlock_bh(&ar_pci->ce_lock); + return ret; } @@ -648,6 +652,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, dma_addr_t ce_data_base = 0; int i; + spin_lock_bh(&ar_pci->ce_lock); + ce_diag = ar_pci->ce_diag; /* @@ -688,7 +694,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); /* Set up to receive directly into Target(!) address */ - ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address); + ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address); if (ret != 0) goto done; @@ -696,15 +702,15 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * Request CE to send caller-supplied data that * was copied to bounce buffer to Target(!) address. */ - ret = ath10k_ce_send(ce_diag, NULL, (u32)ce_data, - nbytes, 0, 0); + ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, + nbytes, 0, 0); if (ret != 0) goto done; i = 0; - while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id) != 0) { + while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, + &completed_nbytes, + &id) != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -724,9 +730,9 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, } i = 0; - while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { + while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, + &completed_nbytes, + &id, &flags) != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -760,6 +766,8 @@ done: ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); + spin_unlock_bh(&ar_pci->ce_lock); + return ret; } @@ -815,20 +823,24 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; - void *transfer_context; + struct sk_buff_head list; + struct sk_buff *skb; u32 ce_data; unsigned int nbytes; unsigned int transfer_id; - while (ath10k_ce_completed_send_next(ce_state, &transfer_context, - &ce_data, &nbytes, - &transfer_id) == 0) { + __skb_queue_head_init(&list); + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data, + &nbytes, &transfer_id) == 0) { /* no need to call tx completion for NULL pointers */ - if (transfer_context == NULL) + if (skb == NULL) continue; - cb->tx_completion(ar, transfer_context, transfer_id); + __skb_queue_tail(&list, skb); } + + while ((skb = __skb_dequeue(&list))) + cb->tx_completion(ar, skb); } /* Called by lower (CE) layer when data is received from the Target. */ @@ -839,12 +851,14 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; struct sk_buff *skb; + struct sk_buff_head list; void *transfer_context; u32 ce_data; unsigned int nbytes, max_nbytes; unsigned int transfer_id; unsigned int flags; + __skb_queue_head_init(&list); while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, &ce_data, &nbytes, &transfer_id, &flags) == 0) { @@ -861,7 +875,16 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) } skb_put(skb, nbytes); - cb->rx_completion(ar, skb, pipe_info->pipe_num); + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", + ce_state->id, skb->len); + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", + skb->data, skb->len); + + cb->rx_completion(ar, skb); } ath10k_pci_rx_post_pipe(pipe_info); @@ -936,6 +959,12 @@ err: return err; } +static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); +} + static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -986,6 +1015,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) spin_lock_bh(&ar->data_lock); + ar->stats.fw_crash_counter++; + crash_data = ath10k_debug_get_new_fw_crash_data(ar); if (crash_data) @@ -1121,14 +1152,37 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, &dl_is_polled); } -static void ath10k_pci_irq_disable(struct ath10k *ar) +static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; + u32 val; + + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); + val &= ~CORE_CTRL_PCIE_REG_31_MASK; + + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); +} +static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) +{ + u32 val; + + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); + val |= CORE_CTRL_PCIE_REG_31_MASK; + + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); +} + +static void ath10k_pci_irq_disable(struct ath10k *ar) +{ ath10k_ce_disable_interrupts(ar); ath10k_pci_disable_and_clear_legacy_irq(ar); - /* FIXME: How to mask all MSI interrupts? */ + ath10k_pci_irq_msi_fw_mask(ar); +} + +static void ath10k_pci_irq_sync(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int i; for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) synchronize_irq(ar_pci->pdev->irq + i); @@ -1138,7 +1192,7 @@ static void ath10k_pci_irq_enable(struct ath10k *ar) { ath10k_ce_enable_interrupts(ar); ath10k_pci_enable_legacy_irq(ar); - /* FIXME: How to unmask all MSI interrupts? */ + ath10k_pci_irq_msi_fw_unmask(ar); } static int ath10k_pci_hif_start(struct ath10k *ar) @@ -1151,64 +1205,74 @@ static int ath10k_pci_hif_start(struct ath10k *ar) return 0; } -static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) +static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) { struct ath10k *ar; - struct ath10k_pci *ar_pci; - struct ath10k_ce_pipe *ce_hdl; - u32 buf_sz; - struct sk_buff *netbuf; - u32 ce_data; + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + int i; - buf_sz = pipe_info->buf_sz; + ar = pci_pipe->hif_ce_state; + ce_pipe = pci_pipe->ce_hdl; + ce_ring = ce_pipe->dest_ring; - /* Unused Copy Engine */ - if (buf_sz == 0) + if (!ce_ring) return; - ar = pipe_info->hif_ce_state; - ar_pci = ath10k_pci_priv(ar); - ce_hdl = pipe_info->ce_hdl; + if (!pci_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; - while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, - &ce_data) == 0) { - dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr, - netbuf->len + skb_tailroom(netbuf), + dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, + skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); - dev_kfree_skb_any(netbuf); + dev_kfree_skb_any(skb); } } -static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) +static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) { struct ath10k *ar; struct ath10k_pci *ar_pci; - struct ath10k_ce_pipe *ce_hdl; - struct sk_buff *netbuf; - u32 ce_data; - unsigned int nbytes; + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct ce_desc *ce_desc; + struct sk_buff *skb; unsigned int id; - u32 buf_sz; + int i; - buf_sz = pipe_info->buf_sz; + ar = pci_pipe->hif_ce_state; + ar_pci = ath10k_pci_priv(ar); + ce_pipe = pci_pipe->ce_hdl; + ce_ring = ce_pipe->src_ring; - /* Unused Copy Engine */ - if (buf_sz == 0) + if (!ce_ring) return; - ar = pipe_info->hif_ce_state; - ar_pci = ath10k_pci_priv(ar); - ce_hdl = pipe_info->ce_hdl; + if (!pci_pipe->buf_sz) + return; - while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, - &ce_data, &nbytes, &id) == 0) { - /* no need to call tx completion for NULL pointers */ - if (!netbuf) + ce_desc = ce_ring->shadow_base; + if (WARN_ON(!ce_desc)) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) continue; - ar_pci->msg_callbacks_current.tx_completion(ar, - netbuf, - id); + ce_ring->per_transfer_context[i] = NULL; + id = MS(__le16_to_cpu(ce_desc[i].flags), + CE_DESC_FLAGS_META_DATA); + + ar_pci->msg_callbacks_current.tx_completion(ar, skb); } } @@ -1266,6 +1330,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_warm_reset(ar); ath10k_pci_irq_disable(ar); + ath10k_pci_irq_sync(ar); ath10k_pci_flush(ar); } @@ -1386,6 +1451,9 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) &nbytes, &transfer_id, &flags)) return; + if (WARN_ON_ONCE(!xfer)) + return; + if (!xfer->wait_for_resp) { ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); return; @@ -1569,23 +1637,40 @@ static int ath10k_pci_init_config(struct ath10k *ar) return 0; } -static int ath10k_pci_alloc_ce(struct ath10k *ar) +static int ath10k_pci_alloc_pipes(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_pci_pipe *pipe; int i, ret; for (i = 0; i < CE_COUNT; i++) { - ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); + pipe = &ar_pci->pipe_info[i]; + pipe->ce_hdl = &ar_pci->ce_states[i]; + pipe->pipe_num = i; + pipe->hif_ce_state = ar; + + ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i], + ath10k_pci_ce_send_done, + ath10k_pci_ce_recv_data); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", i, ret); return ret; } + + /* Last CE is Diagnostic Window */ + if (i == CE_COUNT - 1) { + ar_pci->ce_diag = pipe->ce_hdl; + continue; + } + + pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); } return 0; } -static void ath10k_pci_free_ce(struct ath10k *ar) +static void ath10k_pci_free_pipes(struct ath10k *ar) { int i; @@ -1593,39 +1678,17 @@ static void ath10k_pci_free_ce(struct ath10k *ar) ath10k_ce_free_pipe(ar, i); } -static int ath10k_pci_ce_init(struct ath10k *ar) +static int ath10k_pci_init_pipes(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_pci_pipe *pipe_info; - const struct ce_attr *attr; - int pipe_num, ret; + int i, ret; - for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num]; - pipe_info->pipe_num = pipe_num; - pipe_info->hif_ce_state = ar; - attr = &host_ce_config_wlan[pipe_num]; - - ret = ath10k_ce_init_pipe(ar, pipe_num, attr, - ath10k_pci_ce_send_done, - ath10k_pci_ce_recv_data); + for (i = 0; i < CE_COUNT; i++) { + ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); if (ret) { ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", - pipe_num, ret); + i, ret); return ret; } - - if (pipe_num == CE_COUNT - 1) { - /* - * Reserve the ultimate CE for - * diagnostic Window support - */ - ar_pci->ce_diag = pipe_info->ce_hdl; - continue; - } - - pipe_info->buf_sz = (size_t)(attr->src_sz_max); } return 0; @@ -1666,93 +1729,167 @@ static void ath10k_pci_warm_reset_si0(struct ath10k *ar) msleep(10); } -static int ath10k_pci_warm_reset(struct ath10k *ar) +static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) { u32 val; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); - - /* debug */ - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_CAUSE_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", - val); + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - CPU_INTR_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", - val); + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_RESET_CONTROL_ADDRESS); + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); +} - /* disable pending irqs */ - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_ENABLE_ADDRESS, 0); +static void ath10k_pci_warm_reset_ce(struct ath10k *ar) +{ + u32 val; - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_CLR_ADDRESS, ~0); + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_RESET_CONTROL_ADDRESS); - msleep(100); + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_CE_RST_MASK); + msleep(10); + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, + val & ~SOC_RESET_CONTROL_CE_RST_MASK); +} - /* clear fw indicator */ - ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); +static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) +{ + u32 val; - /* clear target LF timer interrupts */ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS); ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS, val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); +} - /* reset CE */ - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, - val | SOC_RESET_CONTROL_CE_RST_MASK); - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - msleep(10); +static int ath10k_pci_warm_reset(struct ath10k *ar) +{ + int ret; - /* unreset CE */ - ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, - val & ~SOC_RESET_CONTROL_CE_RST_MASK); - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - msleep(10); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); + + spin_lock_bh(&ar->data_lock); + ar->stats.fw_warm_reset_counter++; + spin_unlock_bh(&ar->data_lock); + ath10k_pci_irq_disable(ar); + + /* Make sure the target CPU is not doing anything dangerous, e.g. if it + * were to access copy engine while host performs copy engine reset + * then it is possible for the device to confuse pci-e controller to + * the point of bringing host system to a complete stop (i.e. hang). + */ ath10k_pci_warm_reset_si0(ar); + ath10k_pci_warm_reset_cpu(ar); + ath10k_pci_init_pipes(ar); + ath10k_pci_wait_for_target_init(ar); - /* debug */ - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_CAUSE_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", - val); + ath10k_pci_warm_reset_clear_lf(ar); + ath10k_pci_warm_reset_ce(ar); + ath10k_pci_warm_reset_cpu(ar); + ath10k_pci_init_pipes(ar); - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - CPU_INTR_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", - val); + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target init: %d\n", ret); + return ret; + } - /* CPU warm reset */ - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, - val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", - val); + return 0; +} - msleep(100); +static int ath10k_pci_chip_reset(struct ath10k *ar) +{ + int i, ret; + u32 val; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n"); + + /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. + * It is thus preferred to use warm reset which is safer but may not be + * able to recover the device from all possible fail scenarios. + * + * Warm reset doesn't always work on first try so attempt it a few + * times before giving up. + */ + for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { + ret = ath10k_pci_warm_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", + i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, + ret); + continue; + } + + /* FIXME: Sometimes copy engine doesn't recover after warm + * reset. In most cases this needs cold reset. In some of these + * cases the device is in such a state that a cold reset may + * lock up the host. + * + * Reading any host interest register via copy engine is + * sufficient to verify if device is capable of booting + * firmware blob. + */ + ret = ath10k_pci_init_pipes(ar); + if (ret) { + ath10k_warn(ar, "failed to init copy engine: %d\n", + ret); + continue; + } + + ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, + &val); + if (ret) { + ath10k_warn(ar, "failed to poke copy engine: %d\n", + ret); + continue; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); + return 0; + } + + if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { + ath10k_warn(ar, "refusing cold reset as requested\n"); + return -EPERM; + } + + ret = ath10k_pci_cold_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to cold reset: %d\n", ret); + return ret; + } + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", + ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n"); return 0; } -static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) +static int ath10k_pci_hif_power_up(struct ath10k *ar) { int ret; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_err(ar, "failed to wake up target: %d\n", ret); + return ret; + } + /* * Bring the target up cleanly. * @@ -1763,26 +1900,16 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) * is in an unexpected state. We try to catch that here in order to * reset the Target and retry the probe. */ - if (cold_reset) - ret = ath10k_pci_cold_reset(ar); - else - ret = ath10k_pci_warm_reset(ar); - + ret = ath10k_pci_chip_reset(ar); if (ret) { - ath10k_err(ar, "failed to reset target: %d\n", ret); - goto err; + ath10k_err(ar, "failed to reset chip: %d\n", ret); + goto err_sleep; } - ret = ath10k_pci_ce_init(ar); + ret = ath10k_pci_init_pipes(ar); if (ret) { ath10k_err(ar, "failed to initialize CE: %d\n", ret); - goto err; - } - - ret = ath10k_pci_wait_for_target_init(ar); - if (ret) { - ath10k_err(ar, "failed to wait for target to init: %d\n", ret); - goto err_ce; + goto err_sleep; } ret = ath10k_pci_init_config(ar); @@ -1801,73 +1928,21 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) err_ce: ath10k_pci_ce_deinit(ar); - ath10k_pci_warm_reset(ar); -err: - return ret; -} - -static int ath10k_pci_hif_power_up_warm(struct ath10k *ar) -{ - int i, ret; - - /* - * Sometime warm reset succeeds after retries. - * - * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work - * at first try. - */ - for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { - ret = __ath10k_pci_hif_power_up(ar, false); - if (ret == 0) - break; - - ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n", - i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret); - } +err_sleep: + ath10k_pci_sleep(ar); return ret; } -static int ath10k_pci_hif_power_up(struct ath10k *ar) -{ - int ret; - - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); - - /* - * Hardware CUS232 version 2 has some issues with cold reset and the - * preferred (and safer) way to perform a device reset is through a - * warm reset. - * - * Warm reset doesn't always work though so fall back to cold reset may - * be necessary. - */ - ret = ath10k_pci_hif_power_up_warm(ar); - if (ret) { - ath10k_warn(ar, "failed to power up target using warm reset: %d\n", - ret); - - if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) - return ret; - - ath10k_warn(ar, "trying cold reset\n"); - - ret = __ath10k_pci_hif_power_up(ar, true); - if (ret) { - ath10k_err(ar, "failed to power up target using cold reset too (%d)\n", - ret); - return ret; - } - } - - return 0; -} - static void ath10k_pci_hif_power_down(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); - ath10k_pci_warm_reset(ar); + /* Currently hif_power_up performs effectively a reset and hif_stop + * resets the chip as well so there's no point in resetting here. + */ + + ath10k_pci_sleep(ar); } #ifdef CONFIG_PM @@ -1921,6 +1996,8 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .tx_sg = ath10k_pci_hif_tx_sg, + .diag_read = ath10k_pci_hif_diag_read, + .diag_write = ath10k_pci_diag_write_mem, .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, .start = ath10k_pci_hif_start, .stop = ath10k_pci_hif_stop, @@ -1931,6 +2008,8 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, .power_up = ath10k_pci_hif_power_up, .power_down = ath10k_pci_hif_power_down, + .read32 = ath10k_pci_read32, + .write32 = ath10k_pci_write32, #ifdef CONFIG_PM .suspend = ath10k_pci_hif_suspend, .resume = ath10k_pci_hif_resume, @@ -2250,14 +2329,14 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) if (ar_pci->num_msi_intrs == 0) /* Fix potential race by repeating CORE_BASE writes */ - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_ENABLE_ADDRESS, - PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL); + ath10k_pci_enable_legacy_irq(ar); mdelay(10); } while (time_before(jiffies, timeout)); + ath10k_pci_disable_and_clear_legacy_irq(ar); + ath10k_pci_irq_msi_fw_mask(ar); + if (val == 0xffffffff) { ath10k_err(ar, "failed to read device register, device is gone\n"); return -EIO; @@ -2287,6 +2366,12 @@ static int ath10k_pci_cold_reset(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); + spin_lock_bh(&ar->data_lock); + + ar->stats.fw_cold_reset_counter++; + + spin_unlock_bh(&ar->data_lock); + /* Put Target, including PCIe, into RESET. */ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); val |= 1; @@ -2400,6 +2485,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, u32 chip_id; ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, + ATH10K_BUS_PCI, &ath10k_pci_hif_ops); if (!ar) { dev_err(&pdev->dev, "failed to allocate core\n"); @@ -2435,7 +2521,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_sleep; } - ret = ath10k_pci_alloc_ce(ar); + ret = ath10k_pci_alloc_pipes(ar); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ret); @@ -2443,25 +2529,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev, } ath10k_pci_ce_deinit(ar); - - ret = ath10k_ce_disable_interrupts(ar); - if (ret) { - ath10k_err(ar, "failed to disable copy engine interrupts: %d\n", - ret); - goto err_free_ce; - } - - /* Workaround: There's no known way to mask all possible interrupts via - * device CSR. The only way to make sure device doesn't assert - * interrupts is to reset it. Interrupts are then disabled on host - * after handlers are registered. - */ - ath10k_pci_warm_reset(ar); + ath10k_pci_irq_disable(ar); ret = ath10k_pci_init_irq(ar); if (ret) { ath10k_err(ar, "failed to init irqs: %d\n", ret); - goto err_free_ce; + goto err_free_pipes; } ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", @@ -2474,8 +2547,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_deinit_irq; } - /* This shouldn't race as the device has been reset above. */ - ath10k_pci_irq_disable(ar); + ath10k_pci_sleep(ar); ret = ath10k_core_register(ar, chip_id); if (ret) { @@ -2492,8 +2564,8 @@ err_free_irq: err_deinit_irq: ath10k_pci_deinit_irq(ar); -err_free_ce: - ath10k_pci_free_ce(ar); +err_free_pipes: + ath10k_pci_free_pipes(ar); err_sleep: ath10k_pci_sleep(ar); @@ -2527,8 +2599,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_pci_kill_tasklet(ar); ath10k_pci_deinit_irq(ar); ath10k_pci_ce_deinit(ar); - ath10k_pci_free_ce(ar); - ath10k_pci_sleep(ar); + ath10k_pci_free_pipes(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); } @@ -2565,5 +2636,7 @@ module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 3e1454b74e007200ccc8c6ee4a3f1647cefceebd..63ce61fcdac85c070033091fa9ac40e268021436 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -56,14 +56,14 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len, } int ath10k_spectral_process_fft(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_fft_report *fftr, + const struct wmi_phyerr *phyerr, + const struct phyerr_fft_report *fftr, size_t bin_len, u64 tsf) { struct fft_sample_ath10k *fft_sample; u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS]; u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag; - u32 reg0, reg1, nf_list1, nf_list2; + u32 reg0, reg1; u8 chain_idx, *bins; int dc_pos; @@ -82,7 +82,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar, /* TODO: there might be a reason why the hardware reports 20/40/80 MHz, * but the results/plots suggest that its actually 22/44/88 MHz. */ - switch (event->hdr.chan_width_mhz) { + switch (phyerr->chan_width_mhz) { case 20: fft_sample->chan_width_mhz = 22; break; @@ -101,7 +101,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar, fft_sample->chan_width_mhz = 88; break; default: - fft_sample->chan_width_mhz = event->hdr.chan_width_mhz; + fft_sample->chan_width_mhz = phyerr->chan_width_mhz; } fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB); @@ -110,36 +110,22 @@ int ath10k_spectral_process_fft(struct ath10k *ar, peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); fft_sample->max_magnitude = __cpu_to_be16(peak_mag); fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX); - fft_sample->rssi = event->hdr.rssi_combined; + fft_sample->rssi = phyerr->rssi_combined; total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB); base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB); fft_sample->total_gain_db = __cpu_to_be16(total_gain_db); fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db); - freq1 = __le16_to_cpu(event->hdr.freq1); - freq2 = __le16_to_cpu(event->hdr.freq2); + freq1 = __le16_to_cpu(phyerr->freq1); + freq2 = __le16_to_cpu(phyerr->freq2); fft_sample->freq1 = __cpu_to_be16(freq1); fft_sample->freq2 = __cpu_to_be16(freq2); - nf_list1 = __le32_to_cpu(event->hdr.nf_list_1); - nf_list2 = __le32_to_cpu(event->hdr.nf_list_2); chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX); - switch (chain_idx) { - case 0: - fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu); - break; - case 1: - fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu); - break; - case 2: - fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu); - break; - case 3: - fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu); - break; - } + fft_sample->noise = __cpu_to_be16( + __le16_to_cpu(phyerr->nf_chains[chain_idx])); bins = (u8 *)fftr; bins += sizeof(*fftr); diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h index ddc57c55727214a386813e7f0a99393930161683..042f5b302c75ebc3121cd56cd7bacd830a10a8ce 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.h +++ b/drivers/net/wireless/ath/ath10k/spectral.h @@ -47,8 +47,8 @@ enum ath10k_spectral_mode { #ifdef CONFIG_ATH10K_DEBUGFS int ath10k_spectral_process_fft(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_fft_report *fftr, + const struct wmi_phyerr *phyerr, + const struct phyerr_fft_report *fftr, size_t bin_len, u64 tsf); int ath10k_spectral_start(struct ath10k *ar); int ath10k_spectral_vif_stop(struct ath10k_vif *arvif); @@ -59,8 +59,8 @@ void ath10k_spectral_destroy(struct ath10k *ar); static inline int ath10k_spectral_process_fft(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_fft_report *fftr, + const struct wmi_phyerr *phyerr, + const struct phyerr_fft_report *fftr, size_t bin_len, u64 tsf) { return 0; diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index 574b75ab260988a6525339ed44623e71e790a273..b289378b6e3e2cb3d17153fade7a88f78132bb31 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -20,6 +20,15 @@ #include #include "core.h" +#if !defined(_TRACE_H_) +static inline u32 ath10k_frm_hdr_len(const void *buf) +{ + const struct ieee80211_hdr *hdr = buf; + + return ieee80211_hdrlen(hdr->frame_control); +} +#endif + #define _TRACE_H_ /* create empty functions when tracing is disabled */ @@ -138,7 +147,8 @@ TRACE_EVENT(ath10k_log_dbg_dump, ); TRACE_EVENT(ath10k_wmi_cmd, - TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len, int ret), + TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len, + int ret), TP_ARGS(ar, id, buf, buf_len, ret), @@ -171,7 +181,7 @@ TRACE_EVENT(ath10k_wmi_cmd, ); TRACE_EVENT(ath10k_wmi_event, - TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len), TP_ARGS(ar, id, buf, buf_len), @@ -201,7 +211,7 @@ TRACE_EVENT(ath10k_wmi_event, ); TRACE_EVENT(ath10k_htt_stats, - TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len), TP_ARGS(ar, buf, buf_len), @@ -228,7 +238,7 @@ TRACE_EVENT(ath10k_htt_stats, ); TRACE_EVENT(ath10k_wmi_dbglog, - TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len), TP_ARGS(ar, buf, buf_len), @@ -254,6 +264,195 @@ TRACE_EVENT(ath10k_wmi_dbglog, ) ); +TRACE_EVENT(ath10k_htt_pktlog, + TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len), + + TP_ARGS(ar, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, buf_len) + __dynamic_array(u8, pktlog, buf_len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(pktlog), buf, buf_len); + ), + + TP_printk( + "%s %s size %hu", + __get_str(driver), + __get_str(device), + __entry->buf_len + ) +); + +TRACE_EVENT(ath10k_htt_tx, + TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len, + u8 vdev_id, u8 tid), + + TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, msdu_id) + __field(u16, msdu_len) + __field(u8, vdev_id) + __field(u8, tid) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->msdu_id = msdu_id; + __entry->msdu_len = msdu_len; + __entry->vdev_id = vdev_id; + __entry->tid = tid; + ), + + TP_printk( + "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d", + __get_str(driver), + __get_str(device), + __entry->msdu_id, + __entry->msdu_len, + __entry->vdev_id, + __entry->tid + ) +); + +TRACE_EVENT(ath10k_txrx_tx_unref, + TP_PROTO(struct ath10k *ar, u16 msdu_id), + + TP_ARGS(ar, msdu_id), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, msdu_id) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->msdu_id = msdu_id; + ), + + TP_printk( + "%s %s msdu_id %d", + __get_str(driver), + __get_str(device), + __entry->msdu_id + ) +); + +DECLARE_EVENT_CLASS(ath10k_hdr_event, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(size_t, len) + __dynamic_array(u8, data, ath10k_frm_hdr_len(data)) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->len = ath10k_frm_hdr_len(data); + memcpy(__get_dynamic_array(data), data, __entry->len); + ), + + TP_printk( + "%s %s len %zu\n", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +DECLARE_EVENT_CLASS(ath10k_payload_event, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(size_t, len) + __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data))) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->len = len - ath10k_frm_hdr_len(data); + memcpy(__get_dynamic_array(payload), + data + ath10k_frm_hdr_len(data), __entry->len); + ), + + TP_printk( + "%s %s len %zu\n", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +TRACE_EVENT(ath10k_htt_rx_desc, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, len) + __dynamic_array(u8, rxdesc, len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->len = len; + memcpy(__get_dynamic_array(rxdesc), data, len); + ), + + TP_printk( + "%s %s rxdesc len %d", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ /* we don't want to use include/trace/events */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index a0cbc21d0d4b9146ff4a4f578f29be2f88bcdc13..7579de8e7a8ccf9a41beafa9f3dc4dedcc7b004e 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -78,6 +78,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); + trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); if (tx_done->discard) { ieee80211_free_txskb(htt->ar->hw, msdu); @@ -145,7 +146,8 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, mapped = !!ath10k_peer_find(ar, vdev_id, addr); spin_unlock_bh(&ar->data_lock); - mapped == expect_mapped; + (mapped == expect_mapped || + test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)); }), 3*HZ); if (ret <= 0) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 2c42bd504b792cce4bcf23e74c0184bcf6ba9569..c0f3e4d09263e38a39b5e471faba6fc0d321a7e0 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -609,6 +609,40 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = { .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, }; +static void +ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, + const struct wmi_channel_arg *arg) +{ + u32 flags = 0; + + memset(ch, 0, sizeof(*ch)); + + if (arg->passive) + flags |= WMI_CHAN_FLAG_PASSIVE; + if (arg->allow_ibss) + flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; + if (arg->allow_ht) + flags |= WMI_CHAN_FLAG_ALLOW_HT; + if (arg->allow_vht) + flags |= WMI_CHAN_FLAG_ALLOW_VHT; + if (arg->ht40plus) + flags |= WMI_CHAN_FLAG_HT40_PLUS; + if (arg->chan_radar) + flags |= WMI_CHAN_FLAG_DFS; + + ch->mhz = __cpu_to_le32(arg->freq); + ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1); + ch->band_center_freq2 = 0; + ch->min_power = arg->min_power; + ch->max_power = arg->max_power; + ch->reg_power = arg->max_reg_power; + ch->antenna_max = arg->max_antenna_gain; + + /* mode & flags share storage */ + ch->mode = arg->mode; + ch->flags |= __cpu_to_le32(flags); +} + int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { int ret; @@ -745,6 +779,10 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) ath10k_wmi_tx_beacons_nowait(ar); ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id); + + if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + ret = -ESHUTDOWN; + (ret != -EAGAIN); }), 3*HZ); @@ -800,6 +838,8 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); + trace_ath10k_tx_hdr(ar, skb->data, skb->len); + trace_ath10k_tx_payload(ar, skb->data, skb->len); /* Send the management frame buffer to the target */ ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid); @@ -1073,13 +1113,46 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band) return rate_idx; } +/* If keys are configured, HW decrypts all frames + * with protected bit set. Mark such frames as decrypted. + */ +static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar, + struct sk_buff *skb, + struct ieee80211_rx_status *status) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + bool peer_key; + u8 *addr, keyidx; + + if (!ieee80211_is_auth(hdr->frame_control) || + !ieee80211_has_protected(hdr->frame_control)) + return; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN)) + return; + + keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT; + addr = ieee80211_get_SA(hdr); + + spin_lock_bh(&ar->data_lock); + peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx); + spin_unlock_bh(&ar->data_lock); + + if (peer_key) { + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac wep key present for peer %pM\n", addr); + status->flag |= RX_FLAG_DECRYPTED; + } +} + static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_mgmt_rx_event_v1 *ev_v1; struct wmi_mgmt_rx_event_v2 *ev_v2; struct wmi_mgmt_rx_hdr_v1 *ev_hdr; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_channel *ch; struct ieee80211_hdr *hdr; u32 rx_status; u32 channel; @@ -1127,30 +1200,34 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) return 0; } - if (rx_status & WMI_RX_STATUS_ERR_CRC) - status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (rx_status & WMI_RX_STATUS_ERR_CRC) { + dev_kfree_skb(skb); + return 0; + } + if (rx_status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; - /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to + /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to * MODE_11B. This means phy_mode is not a reliable source for the band - * of mgmt rx. */ - - ch = ar->scan_channel; - if (!ch) - ch = ar->rx_channel; - - if (ch) { - status->band = ch->band; - - if (phy_mode == MODE_11B && - status->band == IEEE80211_BAND_5GHZ) - ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); + * of mgmt rx. + */ + if (channel >= 1 && channel <= 14) { + status->band = IEEE80211_BAND_2GHZ; + } else if (channel >= 36 && channel <= 165) { + status->band = IEEE80211_BAND_5GHZ; } else { - ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n"); - status->band = phy_mode_to_band(phy_mode); + /* Shouldn't happen unless list of advertised channels to + * mac80211 has been changed. + */ + WARN_ON_ONCE(1); + dev_kfree_skb(skb); + return 0; } + if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ) + ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); + status->freq = ieee80211_channel_to_frequency(channel, status->band); status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; status->rate_idx = get_rate_idx(rate, status->band); @@ -1160,6 +1237,8 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) hdr = (struct ieee80211_hdr *)skb->data; fc = le16_to_cpu(hdr->frame_control); + ath10k_wmi_handle_wep_reauth(ar, skb, status); + /* FW delivers WEP Shared Auth frame with Protected Bit set and * encrypted payload. However in case of PMF it delivers decrypted * frames with Protected Bit set. */ @@ -1295,14 +1374,196 @@ static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) return 0; } +static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src, + struct ath10k_fw_stats_pdev *dst) +{ + const struct wal_dbg_tx_stats *tx = &src->wal.tx; + const struct wal_dbg_rx_stats *rx = &src->wal.rx; + + dst->ch_noise_floor = __le32_to_cpu(src->chan_nf); + dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); + dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); + dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); + dst->cycle_count = __le32_to_cpu(src->cycle_count); + dst->phy_err_count = __le32_to_cpu(src->phy_err_count); + dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); + + dst->comp_queued = __le32_to_cpu(tx->comp_queued); + dst->comp_delivered = __le32_to_cpu(tx->comp_delivered); + dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued); + dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued); + dst->wmm_drop = __le32_to_cpu(tx->wmm_drop); + dst->local_enqued = __le32_to_cpu(tx->local_enqued); + dst->local_freed = __le32_to_cpu(tx->local_freed); + dst->hw_queued = __le32_to_cpu(tx->hw_queued); + dst->hw_reaped = __le32_to_cpu(tx->hw_reaped); + dst->underrun = __le32_to_cpu(tx->underrun); + dst->tx_abort = __le32_to_cpu(tx->tx_abort); + dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed); + dst->tx_ko = __le32_to_cpu(tx->tx_ko); + dst->data_rc = __le32_to_cpu(tx->data_rc); + dst->self_triggers = __le32_to_cpu(tx->self_triggers); + dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure); + dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err); + dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry); + dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout); + dst->pdev_resets = __le32_to_cpu(tx->pdev_resets); + dst->phy_underrun = __le32_to_cpu(tx->phy_underrun); + dst->txop_ovf = __le32_to_cpu(tx->txop_ovf); + + dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change); + dst->status_rcvd = __le32_to_cpu(rx->status_rcvd); + dst->r0_frags = __le32_to_cpu(rx->r0_frags); + dst->r1_frags = __le32_to_cpu(rx->r1_frags); + dst->r2_frags = __le32_to_cpu(rx->r2_frags); + dst->r3_frags = __le32_to_cpu(rx->r3_frags); + dst->htt_msdus = __le32_to_cpu(rx->htt_msdus); + dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus); + dst->loc_msdus = __le32_to_cpu(rx->loc_msdus); + dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus); + dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu); + dst->phy_errs = __le32_to_cpu(rx->phy_errs); + dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop); + dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs); +} + +static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, + struct ath10k_fw_stats_peer *dst) +{ + ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); + dst->peer_rssi = __le32_to_cpu(src->peer_rssi); + dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); +} + +static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const struct wmi_stats_event *ev = (void *)skb->data; + u32 num_pdev_stats, num_vdev_stats, num_peer_stats; + int i; + + if (!skb_pull(skb, sizeof(*ev))) + return -EPROTO; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats(src, dst); + list_add_tail(&dst->list, &stats->pdevs); + } + + /* fw doesn't implement vdev stats */ + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(src, dst); + list_add_tail(&dst->list, &stats->peers); + } + + return 0; +} + +static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const struct wmi_stats_event *ev = (void *)skb->data; + u32 num_pdev_stats, num_vdev_stats, num_peer_stats; + int i; + + if (!skb_pull(skb, sizeof(*ev))) + return -EPROTO; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_10x_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats(&src->old, dst); + + dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad); + dst->rts_bad = __le32_to_cpu(src->rts_bad); + dst->rts_good = __le32_to_cpu(src->rts_good); + dst->fcs_bad = __le32_to_cpu(src->fcs_bad); + dst->no_beacons = __le32_to_cpu(src->no_beacons); + dst->mib_int_count = __le32_to_cpu(src->mib_int_count); + + list_add_tail(&dst->list, &stats->pdevs); + } + + /* fw doesn't implement vdev stats */ + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10x_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(&src->old, dst); + + dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); + + list_add_tail(&dst->list, &stats->peers); + } + + return 0; +} + +int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + return ath10k_wmi_10x_pull_fw_stats(ar, skb, stats); + else + return ath10k_wmi_main_pull_fw_stats(ar, skb, stats); +} + static void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; - ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); - - ath10k_debug_read_target_stats(ar, ev); + ath10k_debug_fw_stats_process(ar, skb); } static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, @@ -1579,6 +1840,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) struct wmi_bcn_info *bcn_info; struct ath10k_vif *arvif; struct sk_buff *bcn; + dma_addr_t paddr; int ret, vdev_id = 0; ev = (struct wmi_host_swba_event *)skb->data; @@ -1647,27 +1909,37 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) ath10k_warn(ar, "SWBA overrun on vdev %d\n", arvif->vdev_id); - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - arvif->beacon = NULL; + ath10k_mac_vif_beacon_free(arvif); } - ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev, - bcn->data, bcn->len, - DMA_TO_DEVICE); - ret = dma_mapping_error(arvif->ar->dev, - ATH10K_SKB_CB(bcn)->paddr); - if (ret) { - ath10k_warn(ar, "failed to map beacon: %d\n", ret); - dev_kfree_skb_any(bcn); - goto skip; + if (!arvif->beacon_buf) { + paddr = dma_map_single(arvif->ar->dev, bcn->data, + bcn->len, DMA_TO_DEVICE); + ret = dma_mapping_error(arvif->ar->dev, paddr); + if (ret) { + ath10k_warn(ar, "failed to map beacon: %d\n", + ret); + dev_kfree_skb_any(bcn); + goto skip; + } + + ATH10K_SKB_CB(bcn)->paddr = paddr; + } else { + if (bcn->len > IEEE80211_MAX_FRAME_LEN) { + ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n", + bcn->len, IEEE80211_MAX_FRAME_LEN); + skb_trim(bcn, IEEE80211_MAX_FRAME_LEN); + } + memcpy(arvif->beacon_buf, bcn->data, bcn->len); + ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr; } arvif->beacon = bcn; arvif->beacon_sent = false; + trace_ath10k_tx_hdr(ar, bcn->data, bcn->len); + trace_ath10k_tx_payload(ar, bcn->data, bcn->len); + ath10k_wmi_tx_beacon_nowait(arvif); skip: spin_unlock_bh(&ar->data_lock); @@ -1681,8 +1953,8 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, } static void ath10k_dfs_radar_report(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_radar_report *rr, + const struct wmi_phyerr *phyerr, + const struct phyerr_radar_report *rr, u64 tsf) { u32 reg0, reg1, tsf32l; @@ -1715,12 +1987,12 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, return; /* report event to DFS pattern detector */ - tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp); + tsf32l = __le32_to_cpu(phyerr->tsf_timestamp); tsf64 = tsf & (~0xFFFFFFFFULL); tsf64 |= tsf32l; width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR); - rssi = event->hdr.rssi_combined; + rssi = phyerr->rssi_combined; /* hardware store this as 8 bit signed value, * set to zero if negative number @@ -1759,8 +2031,8 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, } static int ath10k_dfs_fft_report(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_fft_report *fftr, + const struct wmi_phyerr *phyerr, + const struct phyerr_fft_report *fftr, u64 tsf) { u32 reg0, reg1; @@ -1768,7 +2040,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar, reg0 = __le32_to_cpu(fftr->reg0); reg1 = __le32_to_cpu(fftr->reg1); - rssi = event->hdr.rssi_combined; + rssi = phyerr->rssi_combined; ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", @@ -1797,20 +2069,20 @@ static int ath10k_dfs_fft_report(struct ath10k *ar, } static void ath10k_wmi_event_dfs(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, + const struct wmi_phyerr *phyerr, u64 tsf) { int buf_len, tlv_len, res, i = 0; - struct phyerr_tlv *tlv; - struct phyerr_radar_report *rr; - struct phyerr_fft_report *fftr; - u8 *tlv_buf; + const struct phyerr_tlv *tlv; + const struct phyerr_radar_report *rr; + const struct phyerr_fft_report *fftr; + const u8 *tlv_buf; - buf_len = __le32_to_cpu(event->hdr.buf_len); + buf_len = __le32_to_cpu(phyerr->buf_len); ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", - event->hdr.phy_err_code, event->hdr.rssi_combined, - __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); + phyerr->phy_err_code, phyerr->rssi_combined, + __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len); /* Skip event if DFS disabled */ if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) @@ -1825,9 +2097,9 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, return; } - tlv = (struct phyerr_tlv *)&event->bufp[i]; + tlv = (struct phyerr_tlv *)&phyerr->buf[i]; tlv_len = __le16_to_cpu(tlv->len); - tlv_buf = &event->bufp[i + sizeof(*tlv)]; + tlv_buf = &phyerr->buf[i + sizeof(*tlv)]; ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", tlv_len, tlv->tag, tlv->sig); @@ -1841,7 +2113,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, } rr = (struct phyerr_radar_report *)tlv_buf; - ath10k_dfs_radar_report(ar, event, rr, tsf); + ath10k_dfs_radar_report(ar, phyerr, rr, tsf); break; case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { @@ -1851,7 +2123,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, } fftr = (struct phyerr_fft_report *)tlv_buf; - res = ath10k_dfs_fft_report(ar, event, fftr, tsf); + res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf); if (res) return; break; @@ -1863,16 +2135,16 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, static void ath10k_wmi_event_spectral_scan(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, + const struct wmi_phyerr *phyerr, u64 tsf) { int buf_len, tlv_len, res, i = 0; struct phyerr_tlv *tlv; - u8 *tlv_buf; - struct phyerr_fft_report *fftr; + const void *tlv_buf; + const struct phyerr_fft_report *fftr; size_t fftr_len; - buf_len = __le32_to_cpu(event->hdr.buf_len); + buf_len = __le32_to_cpu(phyerr->buf_len); while (i < buf_len) { if (i + sizeof(*tlv) > buf_len) { @@ -1881,9 +2153,9 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar, return; } - tlv = (struct phyerr_tlv *)&event->bufp[i]; + tlv = (struct phyerr_tlv *)&phyerr->buf[i]; tlv_len = __le16_to_cpu(tlv->len); - tlv_buf = &event->bufp[i + sizeof(*tlv)]; + tlv_buf = &phyerr->buf[i + sizeof(*tlv)]; if (i + sizeof(*tlv) + tlv_len > buf_len) { ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n", @@ -1900,8 +2172,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar, } fftr_len = tlv_len - sizeof(*fftr); - fftr = (struct phyerr_fft_report *)tlv_buf; - res = ath10k_spectral_process_fft(ar, event, + fftr = tlv_buf; + res = ath10k_spectral_process_fft(ar, phyerr, fftr, fftr_len, tsf); if (res < 0) { @@ -1918,8 +2190,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar, static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_comb_phyerr_rx_event *comb_event; - struct wmi_single_phyerr_rx_event *event; + const struct wmi_phyerr_event *ev; + const struct wmi_phyerr *phyerr; u32 count, i, buf_len, phy_err_code; u64 tsf; int left_len = skb->len; @@ -1927,38 +2199,38 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) ATH10K_DFS_STAT_INC(ar, phy_errors); /* Check if combined event available */ - if (left_len < sizeof(*comb_event)) { + if (left_len < sizeof(*ev)) { ath10k_warn(ar, "wmi phyerr combined event wrong len\n"); return; } - left_len -= sizeof(*comb_event); + left_len -= sizeof(*ev); /* Check number of included events */ - comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data; - count = __le32_to_cpu(comb_event->hdr.num_phyerr_events); + ev = (const struct wmi_phyerr_event *)skb->data; + count = __le32_to_cpu(ev->num_phyerrs); - tsf = __le32_to_cpu(comb_event->hdr.tsf_u32); + tsf = __le32_to_cpu(ev->tsf_u32); tsf <<= 32; - tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); + tsf |= __le32_to_cpu(ev->tsf_l32); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event phyerr count %d tsf64 0x%llX\n", count, tsf); - event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp; + phyerr = ev->phyerrs; for (i = 0; i < count; i++) { /* Check if we can read event header */ - if (left_len < sizeof(*event)) { + if (left_len < sizeof(*phyerr)) { ath10k_warn(ar, "single event (%d) wrong head len\n", i); return; } - left_len -= sizeof(*event); + left_len -= sizeof(*phyerr); - buf_len = __le32_to_cpu(event->hdr.buf_len); - phy_err_code = event->hdr.phy_err_code; + buf_len = __le32_to_cpu(phyerr->buf_len); + phy_err_code = phyerr->phy_err_code; if (left_len < buf_len) { ath10k_warn(ar, "single event (%d) wrong buf len\n", i); @@ -1969,20 +2241,20 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) switch (phy_err_code) { case PHY_ERROR_RADAR: - ath10k_wmi_event_dfs(ar, event, tsf); + ath10k_wmi_event_dfs(ar, phyerr, tsf); break; case PHY_ERROR_SPECTRAL_SCAN: - ath10k_wmi_event_spectral_scan(ar, event, tsf); + ath10k_wmi_event_spectral_scan(ar, phyerr, tsf); break; case PHY_ERROR_FALSE_RADAR_EXT: - ath10k_wmi_event_dfs(ar, event, tsf); - ath10k_wmi_event_spectral_scan(ar, event, tsf); + ath10k_wmi_event_dfs(ar, phyerr, tsf); + ath10k_wmi_event_spectral_scan(ar, phyerr, tsf); break; default: break; } - event += sizeof(*event) + buf_len; + phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len; } } @@ -2028,7 +2300,7 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar, /* the last byte is always reserved for the null character */ buf[i] = '\0'; - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); + ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf); } static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) @@ -2163,30 +2435,117 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, return 0; } -static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, - struct sk_buff *skb) +static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg) +{ + struct wmi_service_ready_event *ev; + size_t i, n; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + ev = (void *)skb->data; + skb_pull(skb, sizeof(*ev)); + arg->min_tx_power = ev->hw_min_tx_power; + arg->max_tx_power = ev->hw_max_tx_power; + arg->ht_cap = ev->ht_cap_info; + arg->vht_cap = ev->vht_cap_info; + arg->sw_ver0 = ev->sw_version; + arg->sw_ver1 = ev->sw_version_1; + arg->phy_capab = ev->phy_capability; + arg->num_rf_chains = ev->num_rf_chains; + arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; + arg->num_mem_reqs = ev->num_mem_reqs; + arg->service_map = ev->wmi_service_bitmap; + arg->service_map_len = sizeof(ev->wmi_service_bitmap); + + n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), + ARRAY_SIZE(arg->mem_reqs)); + for (i = 0; i < n; i++) + arg->mem_reqs[i] = &ev->mem_reqs[i]; + + if (skb->len < + __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0])) + return -EPROTO; + + return 0; +} + +static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg) +{ + struct wmi_10x_service_ready_event *ev; + int i, n; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + ev = (void *)skb->data; + skb_pull(skb, sizeof(*ev)); + arg->min_tx_power = ev->hw_min_tx_power; + arg->max_tx_power = ev->hw_max_tx_power; + arg->ht_cap = ev->ht_cap_info; + arg->vht_cap = ev->vht_cap_info; + arg->sw_ver0 = ev->sw_version; + arg->phy_capab = ev->phy_capability; + arg->num_rf_chains = ev->num_rf_chains; + arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; + arg->num_mem_reqs = ev->num_mem_reqs; + arg->service_map = ev->wmi_service_bitmap; + arg->service_map_len = sizeof(ev->wmi_service_bitmap); + + n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), + ARRAY_SIZE(arg->mem_reqs)); + for (i = 0; i < n; i++) + arg->mem_reqs[i] = &ev->mem_reqs[i]; + + if (skb->len < + __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0])) + return -EPROTO; + + return 0; +} + +static void ath10k_wmi_event_service_ready(struct ath10k *ar, + struct sk_buff *skb) { - struct wmi_service_ready_event *ev = (void *)skb->data; - DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {}; + struct wmi_svc_rdy_ev_arg arg = {}; + u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; + int ret; - if (skb->len < sizeof(*ev)) { - ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n", - skb->len, sizeof(*ev)); + memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + ret = ath10k_wmi_10x_pull_svc_rdy_ev(skb, &arg); + wmi_10x_svc_map(arg.service_map, ar->wmi.svc_map, + arg.service_map_len); + } else { + ret = ath10k_wmi_main_pull_svc_rdy_ev(skb, &arg); + wmi_main_svc_map(arg.service_map, ar->wmi.svc_map, + arg.service_map_len); + } + + if (ret) { + ath10k_warn(ar, "failed to parse service ready: %d\n", ret); return; } - ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); - ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); - ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); - ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); + ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power); + ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power); + ar->ht_cap_info = __le32_to_cpu(arg.ht_cap); + ar->vht_cap_info = __le32_to_cpu(arg.vht_cap); ar->fw_version_major = - (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; - ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); + (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24; + ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff); ar->fw_version_release = - (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; - ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); - ar->phy_capability = __le32_to_cpu(ev->phy_capability); - ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); + (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16; + ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff); + ar->phy_capability = __le32_to_cpu(arg.phy_capab); + ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains); + ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd); + + ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", + arg.service_map, arg.service_map_len); /* only manually set fw features when not using FW IE format */ if (ar->fw_api == 1 && ar->fw_version_build > 636) @@ -2198,13 +2557,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; } - ar->ath_common.regulatory.current_rd = - __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); - - wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap); - ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); - ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", - ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); + ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1; + ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1; if (strlen(ar->hw->wiphy->fw_version) == 0) { snprintf(ar->hw->wiphy->fw_version, @@ -2216,93 +2570,18 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, ar->fw_version_build); } - /* FIXME: it probably should be better to support this */ - if (__le32_to_cpu(ev->num_mem_reqs) > 0) { - ath10k_warn(ar, "target requested %d memory chunks; ignoring\n", - __le32_to_cpu(ev->num_mem_reqs)); - } - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", - __le32_to_cpu(ev->sw_version), - __le32_to_cpu(ev->sw_version_1), - __le32_to_cpu(ev->abi_version), - __le32_to_cpu(ev->phy_capability), - __le32_to_cpu(ev->ht_cap_info), - __le32_to_cpu(ev->vht_cap_info), - __le32_to_cpu(ev->vht_supp_mcs), - __le32_to_cpu(ev->sys_cap_info), - __le32_to_cpu(ev->num_mem_reqs), - __le32_to_cpu(ev->num_rf_chains)); - - complete(&ar->wmi.service_ready); -} - -static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, - struct sk_buff *skb) -{ - u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; - int ret; - struct wmi_service_ready_event_10x *ev = (void *)skb->data; - DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {}; - - if (skb->len < sizeof(*ev)) { - ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n", - skb->len, sizeof(*ev)); - return; - } - - ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); - ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); - ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); - ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); - ar->fw_version_major = - (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; - ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); - ar->phy_capability = __le32_to_cpu(ev->phy_capability); - ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); - - if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { - ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", - ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); - ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; - } - - ar->ath_common.regulatory.current_rd = - __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); - - wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap); - ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); - ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", - ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); - - if (strlen(ar->hw->wiphy->fw_version) == 0) { - snprintf(ar->hw->wiphy->fw_version, - sizeof(ar->hw->wiphy->fw_version), - "%u.%u", - ar->fw_version_major, - ar->fw_version_minor); - } - - num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); - - if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { + num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs); + if (num_mem_reqs > WMI_MAX_MEM_REQS) { ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n", num_mem_reqs); return; } - if (!num_mem_reqs) - goto exit; - - ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", - num_mem_reqs); - for (i = 0; i < num_mem_reqs; ++i) { - req_id = __le32_to_cpu(ev->mem_reqs[i].req_id); - num_units = __le32_to_cpu(ev->mem_reqs[i].num_units); - unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size); - num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info); + req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id); + num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units); + unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size); + num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info); if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) /* number of units to allocate is number of @@ -2316,7 +2595,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", req_id, - __le32_to_cpu(ev->mem_reqs[i].num_units), + __le32_to_cpu(arg.mem_reqs[i]->num_units), num_unit_info, unit_size, num_units); @@ -2327,23 +2606,23 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, return; } -exit: ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", - __le32_to_cpu(ev->sw_version), - __le32_to_cpu(ev->abi_version), - __le32_to_cpu(ev->phy_capability), - __le32_to_cpu(ev->ht_cap_info), - __le32_to_cpu(ev->vht_cap_info), - __le32_to_cpu(ev->vht_supp_mcs), - __le32_to_cpu(ev->sys_cap_info), - __le32_to_cpu(ev->num_mem_reqs), - __le32_to_cpu(ev->num_rf_chains)); + "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n", + __le32_to_cpu(arg.min_tx_power), + __le32_to_cpu(arg.max_tx_power), + __le32_to_cpu(arg.ht_cap), + __le32_to_cpu(arg.vht_cap), + __le32_to_cpu(arg.sw_ver0), + __le32_to_cpu(arg.sw_ver1), + __le32_to_cpu(arg.phy_capab), + __le32_to_cpu(arg.num_rf_chains), + __le32_to_cpu(arg.eeprom_rd), + __le32_to_cpu(arg.num_mem_reqs)); complete(&ar->wmi.service_ready); } -static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) +static int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) { struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; @@ -2466,10 +2745,10 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_install_key_complete(ar, skb); break; case WMI_SERVICE_READY_EVENTID: - ath10k_wmi_service_ready_event_rx(ar, skb); + ath10k_wmi_event_service_ready(ar, skb); break; case WMI_READY_EVENTID: - ath10k_wmi_ready_event_rx(ar, skb); + ath10k_wmi_event_ready(ar, skb); break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); @@ -2586,10 +2865,10 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_resume_req(ar, skb); break; case WMI_10X_SERVICE_READY_EVENTID: - ath10k_wmi_10x_service_ready_event_rx(ar, skb); + ath10k_wmi_event_service_ready(ar, skb); break; case WMI_10X_READY_EVENTID: - ath10k_wmi_ready_event_rx(ar, skb); + ath10k_wmi_event_ready(ar, skb); break; case WMI_10X_PDEV_UTF_EVENTID: /* ignore utf events */ @@ -2697,10 +2976,10 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_resume_req(ar, skb); break; case WMI_10_2_SERVICE_READY_EVENTID: - ath10k_wmi_10x_service_ready_event_rx(ar, skb); + ath10k_wmi_event_service_ready(ar, skb); break; case WMI_10_2_READY_EVENTID: - ath10k_wmi_ready_event_rx(ar, skb); + ath10k_wmi_event_ready(ar, skb); break; case WMI_10_2_RTT_KEEPALIVE_EVENTID: case WMI_10_2_GPIO_INPUT_EVENTID: @@ -2732,45 +3011,6 @@ static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) } } -/* WMI Initialization functions */ -int ath10k_wmi_attach(struct ath10k *ar) -{ - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { - if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) - ar->wmi.cmd = &wmi_10_2_cmd_map; - else - ar->wmi.cmd = &wmi_10x_cmd_map; - - ar->wmi.vdev_param = &wmi_10x_vdev_param_map; - ar->wmi.pdev_param = &wmi_10x_pdev_param_map; - } else { - ar->wmi.cmd = &wmi_cmd_map; - ar->wmi.vdev_param = &wmi_vdev_param_map; - ar->wmi.pdev_param = &wmi_pdev_param_map; - } - - init_completion(&ar->wmi.service_ready); - init_completion(&ar->wmi.unified_ready); - init_waitqueue_head(&ar->wmi.tx_credits_wq); - - return 0; -} - -void ath10k_wmi_detach(struct ath10k *ar) -{ - int i; - - /* free the host memory chunks requested by firmware */ - for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - dma_free_coherent(ar->dev, - ar->wmi.mem_chunks[i].len, - ar->wmi.mem_chunks[i].vaddr, - ar->wmi.mem_chunks[i].paddr); - } - - ar->wmi.num_mem_chunks = 0; -} - int ath10k_wmi_connect(struct ath10k *ar) { int status; @@ -2865,42 +3105,6 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, ctl2g, ctl5g); } -int ath10k_wmi_pdev_set_channel(struct ath10k *ar, - const struct wmi_channel_arg *arg) -{ - struct wmi_set_channel_cmd *cmd; - struct sk_buff *skb; - u32 ch_flags = 0; - - if (arg->passive) - return -EINVAL; - - skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); - if (!skb) - return -ENOMEM; - - if (arg->chan_radar) - ch_flags |= WMI_CHAN_FLAG_DFS; - - cmd = (struct wmi_set_channel_cmd *)skb->data; - cmd->chan.mhz = __cpu_to_le32(arg->freq); - cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); - cmd->chan.mode = arg->mode; - cmd->chan.flags |= __cpu_to_le32(ch_flags); - cmd->chan.min_power = arg->min_power; - cmd->chan.max_power = arg->max_power; - cmd->chan.reg_power = arg->max_reg_power; - cmd->chan.reg_classid = arg->reg_class_id; - cmd->chan.antenna_max = arg->max_antenna_gain; - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi set channel mode %d freq %d\n", - arg->mode, arg->freq); - - return ath10k_wmi_cmd_send(ar, skb, - ar->wmi.cmd->pdev_set_channel_cmdid); -} - int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) { struct wmi_pdev_suspend_cmd *cmd; @@ -2951,16 +3155,37 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); } +static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar, + struct wmi_host_mem_chunks *chunks) +{ + struct host_memory_chunk *chunk; + int i; + + chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks); + + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + chunk = &chunks->items[i]; + chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); + chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi chunk %d len %d requested, addr 0x%llx\n", + i, + ar->wmi.mem_chunks[i].len, + (unsigned long long)ar->wmi.mem_chunks[i].paddr); + } +} + static int ath10k_wmi_main_cmd_init(struct ath10k *ar) { struct wmi_init_cmd *cmd; struct sk_buff *buf; struct wmi_resource_config config = {}; u32 len, val; - int i; config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); - config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); + config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS); config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS); config.num_offload_reorder_bufs = @@ -3019,32 +3244,8 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) cmd = (struct wmi_init_cmd *)buf->data; - if (ar->wmi.num_mem_chunks == 0) { - cmd->num_host_mem_chunks = 0; - goto out; - } - - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", - ar->wmi.num_mem_chunks); - - cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); - - for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - cmd->host_mem_chunks[i].ptr = - __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); - cmd->host_mem_chunks[i].size = - __cpu_to_le32(ar->wmi.mem_chunks[i].len); - cmd->host_mem_chunks[i].req_id = - __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi chunk %d len %d requested, addr 0x%llx\n", - i, - ar->wmi.mem_chunks[i].len, - (unsigned long long)ar->wmi.mem_chunks[i].paddr); - } -out: memcpy(&cmd->resource_config, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); @@ -3056,7 +3257,6 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) struct sk_buff *buf; struct wmi_resource_config_10x config = {}; u32 len, val; - int i; config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); @@ -3110,32 +3310,8 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) cmd = (struct wmi_init_cmd_10x *)buf->data; - if (ar->wmi.num_mem_chunks == 0) { - cmd->num_host_mem_chunks = 0; - goto out; - } - - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", - ar->wmi.num_mem_chunks); - - cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); - - for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - cmd->host_mem_chunks[i].ptr = - __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); - cmd->host_mem_chunks[i].size = - __cpu_to_le32(ar->wmi.mem_chunks[i].len); - cmd->host_mem_chunks[i].req_id = - __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi chunk %d len %d requested, addr 0x%llx\n", - i, - ar->wmi.mem_chunks[i].len, - (unsigned long long)ar->wmi.mem_chunks[i].paddr); - } -out: memcpy(&cmd->resource_config, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); @@ -3147,7 +3323,6 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar) struct sk_buff *buf; struct wmi_resource_config_10x config = {}; u32 len, val; - int i; config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); @@ -3201,32 +3376,8 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar) cmd = (struct wmi_init_cmd_10_2 *)buf->data; - if (ar->wmi.num_mem_chunks == 0) { - cmd->num_host_mem_chunks = 0; - goto out; - } - - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", - ar->wmi.num_mem_chunks); - - cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); - - for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - cmd->host_mem_chunks[i].ptr = - __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); - cmd->host_mem_chunks[i].size = - __cpu_to_le32(ar->wmi.mem_chunks[i].len); - cmd->host_mem_chunks[i].req_id = - __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi chunk %d len %d requested, addr 0x%llx\n", - i, - ar->wmi.mem_chunks[i].len, - (unsigned long long)ar->wmi.mem_chunks[i].paddr); - } -out: memcpy(&cmd->resource_config.common, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); @@ -3248,52 +3399,50 @@ int ath10k_wmi_cmd_init(struct ath10k *ar) return ret; } -static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar, - const struct wmi_start_scan_arg *arg) +static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg) { - int len; + if (arg->ie_len && !arg->ie) + return -EINVAL; + if (arg->n_channels && !arg->channels) + return -EINVAL; + if (arg->n_ssids && !arg->ssids) + return -EINVAL; + if (arg->n_bssids && !arg->bssids) + return -EINVAL; - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - len = sizeof(struct wmi_start_scan_cmd_10x); - else - len = sizeof(struct wmi_start_scan_cmd); + if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) + return -EINVAL; + if (arg->n_channels > ARRAY_SIZE(arg->channels)) + return -EINVAL; + if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) + return -EINVAL; + if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) + return -EINVAL; - if (arg->ie_len) { - if (!arg->ie) - return -EINVAL; - if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) - return -EINVAL; + return 0; +} +static size_t +ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg) +{ + int len = 0; + + if (arg->ie_len) { len += sizeof(struct wmi_ie_data); len += roundup(arg->ie_len, 4); } if (arg->n_channels) { - if (!arg->channels) - return -EINVAL; - if (arg->n_channels > ARRAY_SIZE(arg->channels)) - return -EINVAL; - len += sizeof(struct wmi_chan_list); len += sizeof(__le32) * arg->n_channels; } if (arg->n_ssids) { - if (!arg->ssids) - return -EINVAL; - if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) - return -EINVAL; - len += sizeof(struct wmi_ssid_list); len += sizeof(struct wmi_ssid) * arg->n_ssids; } if (arg->n_bssids) { - if (!arg->bssids) - return -EINVAL; - if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) - return -EINVAL; - len += sizeof(struct wmi_bssid_list); len += sizeof(struct wmi_mac_addr) * arg->n_bssids; } @@ -3301,28 +3450,12 @@ static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar, return len; } -int ath10k_wmi_start_scan(struct ath10k *ar, - const struct wmi_start_scan_arg *arg) +static void +ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn, + const struct wmi_start_scan_arg *arg) { - struct wmi_start_scan_cmd *cmd; - struct sk_buff *skb; - struct wmi_ie_data *ie; - struct wmi_chan_list *channels; - struct wmi_ssid_list *ssids; - struct wmi_bssid_list *bssids; u32 scan_id; u32 scan_req_id; - int off; - int len = 0; - int i; - - len = ath10k_wmi_start_scan_calc_len(ar, arg); - if (len < 0) - return len; /* len contains error code here */ - - skb = ath10k_wmi_alloc_skb(ar, len); - if (!skb) - return -ENOMEM; scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX; scan_id |= arg->scan_id; @@ -3330,35 +3463,36 @@ int ath10k_wmi_start_scan(struct ath10k *ar, scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; scan_req_id |= arg->scan_req_id; - cmd = (struct wmi_start_scan_cmd *)skb->data; - cmd->scan_id = __cpu_to_le32(scan_id); - cmd->scan_req_id = __cpu_to_le32(scan_req_id); - cmd->vdev_id = __cpu_to_le32(arg->vdev_id); - cmd->scan_priority = __cpu_to_le32(arg->scan_priority); - cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); - cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); - cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); - cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time); - cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time); - cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); - cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); - cmd->idle_time = __cpu_to_le32(arg->idle_time); - cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time); - cmd->probe_delay = __cpu_to_le32(arg->probe_delay); - cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); - - /* TLV list starts after fields included in the struct */ - /* There's just one filed that differes the two start_scan - * structures - burst_duration, which we are not using btw, - no point to make the split here, just shift the buffer to fit with - given FW */ - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - off = sizeof(struct wmi_start_scan_cmd_10x); - else - off = sizeof(struct wmi_start_scan_cmd); + cmn->scan_id = __cpu_to_le32(scan_id); + cmn->scan_req_id = __cpu_to_le32(scan_req_id); + cmn->vdev_id = __cpu_to_le32(arg->vdev_id); + cmn->scan_priority = __cpu_to_le32(arg->scan_priority); + cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); + cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); + cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); + cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time); + cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time); + cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); + cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); + cmn->idle_time = __cpu_to_le32(arg->idle_time); + cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time); + cmn->probe_delay = __cpu_to_le32(arg->probe_delay); + cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); +} + +static void +ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs, + const struct wmi_start_scan_arg *arg) +{ + struct wmi_ie_data *ie; + struct wmi_chan_list *channels; + struct wmi_ssid_list *ssids; + struct wmi_bssid_list *bssids; + void *ptr = tlvs->tlvs; + int i; if (arg->n_channels) { - channels = (void *)skb->data + off; + channels = ptr; channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG); channels->num_chan = __cpu_to_le32(arg->n_channels); @@ -3366,12 +3500,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar, channels->channel_list[i].freq = __cpu_to_le16(arg->channels[i]); - off += sizeof(*channels); - off += sizeof(__le32) * arg->n_channels; + ptr += sizeof(*channels); + ptr += sizeof(__le32) * arg->n_channels; } if (arg->n_ssids) { - ssids = (void *)skb->data + off; + ssids = ptr; ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG); ssids->num_ssids = __cpu_to_le32(arg->n_ssids); @@ -3383,12 +3517,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar, arg->ssids[i].len); } - off += sizeof(*ssids); - off += sizeof(struct wmi_ssid) * arg->n_ssids; + ptr += sizeof(*ssids); + ptr += sizeof(struct wmi_ssid) * arg->n_ssids; } if (arg->n_bssids) { - bssids = (void *)skb->data + off; + bssids = ptr; bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG); bssids->num_bssid = __cpu_to_le32(arg->n_bssids); @@ -3397,23 +3531,57 @@ int ath10k_wmi_start_scan(struct ath10k *ar, arg->bssids[i].bssid, ETH_ALEN); - off += sizeof(*bssids); - off += sizeof(struct wmi_mac_addr) * arg->n_bssids; + ptr += sizeof(*bssids); + ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids; } if (arg->ie_len) { - ie = (void *)skb->data + off; + ie = ptr; ie->tag = __cpu_to_le32(WMI_IE_TAG); ie->ie_len = __cpu_to_le32(arg->ie_len); memcpy(ie->ie_data, arg->ie, arg->ie_len); - off += sizeof(*ie); - off += roundup(arg->ie_len, 4); + ptr += sizeof(*ie); + ptr += roundup(arg->ie_len, 4); } +} - if (off != skb->len) { - dev_kfree_skb(skb); - return -EINVAL; +int ath10k_wmi_start_scan(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) +{ + struct sk_buff *skb; + size_t len; + int ret; + + ret = ath10k_wmi_start_scan_verify(arg); + if (ret) + return ret; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + len = sizeof(struct wmi_10x_start_scan_cmd) + + ath10k_wmi_start_scan_tlvs_len(arg); + else + len = sizeof(struct wmi_start_scan_cmd) + + ath10k_wmi_start_scan_tlvs_len(arg); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + struct wmi_10x_start_scan_cmd *cmd; + + cmd = (struct wmi_10x_start_scan_cmd *)skb->data; + ath10k_wmi_put_start_scan_common(&cmd->common, arg); + ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg); + } else { + struct wmi_start_scan_cmd *cmd; + + cmd = (struct wmi_start_scan_cmd *)skb->data; + cmd->burst_duration_ms = __cpu_to_le32(0); + + ath10k_wmi_put_start_scan_common(&cmd->common, arg); + ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg); } ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n"); @@ -3532,7 +3700,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar, struct sk_buff *skb; const char *cmdname; u32 flags = 0; - u32 ch_flags = 0; if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid && cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid) @@ -3559,8 +3726,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar, flags |= WMI_VDEV_START_HIDDEN_SSID; if (arg->pmf_enabled) flags |= WMI_VDEV_START_PMF_ENABLED; - if (arg->channel.chan_radar) - ch_flags |= WMI_CHAN_FLAG_DFS; cmd = (struct wmi_vdev_start_request_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); @@ -3576,18 +3741,7 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar, memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); } - cmd->chan.mhz = __cpu_to_le32(arg->channel.freq); - - cmd->chan.band_center_freq1 = - __cpu_to_le32(arg->channel.band_center_freq1); - - cmd->chan.mode = arg->channel.mode; - cmd->chan.flags |= __cpu_to_le32(ch_flags); - cmd->chan.min_power = arg->channel.min_power; - cmd->chan.max_power = arg->channel.max_power; - cmd->chan.reg_power = arg->channel.max_reg_power; - cmd->chan.reg_classid = arg->channel.reg_class_id; - cmd->chan.antenna_max = arg->channel.max_antenna_gain; + ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n", @@ -3968,35 +4122,10 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar, cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); for (i = 0; i < arg->n_channels; i++) { - u32 flags = 0; - ch = &arg->channels[i]; ci = &cmd->chan_info[i]; - if (ch->passive) - flags |= WMI_CHAN_FLAG_PASSIVE; - if (ch->allow_ibss) - flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; - if (ch->allow_ht) - flags |= WMI_CHAN_FLAG_ALLOW_HT; - if (ch->allow_vht) - flags |= WMI_CHAN_FLAG_ALLOW_VHT; - if (ch->ht40plus) - flags |= WMI_CHAN_FLAG_HT40_PLUS; - if (ch->chan_radar) - flags |= WMI_CHAN_FLAG_DFS; - - ci->mhz = __cpu_to_le32(ch->freq); - ci->band_center_freq1 = __cpu_to_le32(ch->freq); - ci->band_center_freq2 = 0; - ci->min_power = ch->min_power; - ci->max_power = ch->max_power; - ci->reg_power = ch->max_reg_power; - ci->antenna_max = ch->max_antenna_gain; - - /* mode & flags share storage */ - ci->mode = ch->mode; - ci->flags |= __cpu_to_le32(flags); + ath10k_wmi_put_wmi_channel(ci, ch); } return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); @@ -4108,9 +4237,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar, if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) - ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); - else ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg); + else + ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); } else { ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); } @@ -4267,3 +4396,73 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); } + +int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_bitmap) +{ + struct wmi_pdev_pktlog_enable_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + ev_bitmap &= ATH10K_PKTLOG_ANY; + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi enable pktlog filter:%x\n", ev_bitmap); + + cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data; + cmd->ev_bitmap = __cpu_to_le32(ev_bitmap); + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_pktlog_enable_cmdid); +} + +int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) +{ + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, 0); + if (!skb) + return -ENOMEM; + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n"); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_pktlog_disable_cmdid); +} + +int ath10k_wmi_attach(struct ath10k *ar) +{ + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ar->wmi.cmd = &wmi_10_2_cmd_map; + else + ar->wmi.cmd = &wmi_10x_cmd_map; + + ar->wmi.vdev_param = &wmi_10x_vdev_param_map; + ar->wmi.pdev_param = &wmi_10x_pdev_param_map; + } else { + ar->wmi.cmd = &wmi_cmd_map; + ar->wmi.vdev_param = &wmi_vdev_param_map; + ar->wmi.pdev_param = &wmi_pdev_param_map; + } + + init_completion(&ar->wmi.service_ready); + init_completion(&ar->wmi.unified_ready); + + return 0; +} + +void ath10k_wmi_detach(struct ath10k *ar) +{ + int i; + + /* free the host memory chunks requested by firmware */ + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + dma_free_coherent(ar->dev, + ar->wmi.mem_chunks[i].len, + ar->wmi.mem_chunks[i].vaddr, + ar->wmi.mem_chunks[i].paddr); + } + + ar->wmi.num_mem_chunks = 0; +} diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 86f5ebccfe79c26a248935de47c6cc13f1b2cbd8..21391929d318c8c3d1a10932952326dc0e9f6438 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -222,128 +222,131 @@ static inline char *wmi_service_name(int service_id) #undef SVCSTR } -#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \ - (__le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ +#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ + ((svc_id) < (len) && \ + __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ BIT((svc_id)%(sizeof(u32)))) -#define SVCMAP(x, y) \ +#define SVCMAP(x, y, len) \ do { \ - if (WMI_SERVICE_IS_ENABLED((in), (x))) \ + if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \ __set_bit(y, out); \ } while (0) -static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out) +static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, + size_t len) { SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD, - WMI_SERVICE_BEACON_OFFLOAD); + WMI_SERVICE_BEACON_OFFLOAD, len); SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD, - WMI_SERVICE_SCAN_OFFLOAD); + WMI_SERVICE_SCAN_OFFLOAD, len); SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD, - WMI_SERVICE_ROAM_OFFLOAD); + WMI_SERVICE_ROAM_OFFLOAD, len); SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD, - WMI_SERVICE_BCN_MISS_OFFLOAD); + WMI_SERVICE_BCN_MISS_OFFLOAD, len); SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE, - WMI_SERVICE_STA_PWRSAVE); + WMI_SERVICE_STA_PWRSAVE, len); SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE, - WMI_SERVICE_STA_ADVANCED_PWRSAVE); + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); SVCMAP(WMI_10X_SERVICE_AP_UAPSD, - WMI_SERVICE_AP_UAPSD); + WMI_SERVICE_AP_UAPSD, len); SVCMAP(WMI_10X_SERVICE_AP_DFS, - WMI_SERVICE_AP_DFS); + WMI_SERVICE_AP_DFS, len); SVCMAP(WMI_10X_SERVICE_11AC, - WMI_SERVICE_11AC); + WMI_SERVICE_11AC, len); SVCMAP(WMI_10X_SERVICE_BLOCKACK, - WMI_SERVICE_BLOCKACK); + WMI_SERVICE_BLOCKACK, len); SVCMAP(WMI_10X_SERVICE_PHYERR, - WMI_SERVICE_PHYERR); + WMI_SERVICE_PHYERR, len); SVCMAP(WMI_10X_SERVICE_BCN_FILTER, - WMI_SERVICE_BCN_FILTER); + WMI_SERVICE_BCN_FILTER, len); SVCMAP(WMI_10X_SERVICE_RTT, - WMI_SERVICE_RTT); + WMI_SERVICE_RTT, len); SVCMAP(WMI_10X_SERVICE_RATECTRL, - WMI_SERVICE_RATECTRL); + WMI_SERVICE_RATECTRL, len); SVCMAP(WMI_10X_SERVICE_WOW, - WMI_SERVICE_WOW); + WMI_SERVICE_WOW, len); SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE, - WMI_SERVICE_RATECTRL_CACHE); + WMI_SERVICE_RATECTRL_CACHE, len); SVCMAP(WMI_10X_SERVICE_IRAM_TIDS, - WMI_SERVICE_IRAM_TIDS); + WMI_SERVICE_IRAM_TIDS, len); SVCMAP(WMI_10X_SERVICE_BURST, - WMI_SERVICE_BURST); + WMI_SERVICE_BURST, len); SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT, - WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT); + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len); SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG, - WMI_SERVICE_FORCE_FW_HANG); + WMI_SERVICE_FORCE_FW_HANG, len); SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, - WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT); + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len); } -static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out) +static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, + size_t len) { SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD, - WMI_SERVICE_BEACON_OFFLOAD); + WMI_SERVICE_BEACON_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD, - WMI_SERVICE_SCAN_OFFLOAD); + WMI_SERVICE_SCAN_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD, - WMI_SERVICE_ROAM_OFFLOAD); + WMI_SERVICE_ROAM_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD, - WMI_SERVICE_BCN_MISS_OFFLOAD); + WMI_SERVICE_BCN_MISS_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE, - WMI_SERVICE_STA_PWRSAVE); + WMI_SERVICE_STA_PWRSAVE, len); SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE, - WMI_SERVICE_STA_ADVANCED_PWRSAVE); + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD, - WMI_SERVICE_AP_UAPSD); + WMI_SERVICE_AP_UAPSD, len); SVCMAP(WMI_MAIN_SERVICE_AP_DFS, - WMI_SERVICE_AP_DFS); + WMI_SERVICE_AP_DFS, len); SVCMAP(WMI_MAIN_SERVICE_11AC, - WMI_SERVICE_11AC); + WMI_SERVICE_11AC, len); SVCMAP(WMI_MAIN_SERVICE_BLOCKACK, - WMI_SERVICE_BLOCKACK); + WMI_SERVICE_BLOCKACK, len); SVCMAP(WMI_MAIN_SERVICE_PHYERR, - WMI_SERVICE_PHYERR); + WMI_SERVICE_PHYERR, len); SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER, - WMI_SERVICE_BCN_FILTER); + WMI_SERVICE_BCN_FILTER, len); SVCMAP(WMI_MAIN_SERVICE_RTT, - WMI_SERVICE_RTT); + WMI_SERVICE_RTT, len); SVCMAP(WMI_MAIN_SERVICE_RATECTRL, - WMI_SERVICE_RATECTRL); + WMI_SERVICE_RATECTRL, len); SVCMAP(WMI_MAIN_SERVICE_WOW, - WMI_SERVICE_WOW); + WMI_SERVICE_WOW, len); SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE, - WMI_SERVICE_RATECTRL_CACHE); + WMI_SERVICE_RATECTRL_CACHE, len); SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS, - WMI_SERVICE_IRAM_TIDS); + WMI_SERVICE_IRAM_TIDS, len); SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD, - WMI_SERVICE_ARPNS_OFFLOAD); + WMI_SERVICE_ARPNS_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_NLO, - WMI_SERVICE_NLO); + WMI_SERVICE_NLO, len); SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD, - WMI_SERVICE_GTK_OFFLOAD); + WMI_SERVICE_GTK_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH, - WMI_SERVICE_SCAN_SCH); + WMI_SERVICE_SCAN_SCH, len); SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD, - WMI_SERVICE_CSA_OFFLOAD); + WMI_SERVICE_CSA_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_CHATTER, - WMI_SERVICE_CHATTER); + WMI_SERVICE_CHATTER, len); SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID, - WMI_SERVICE_COEX_FREQAVOID); + WMI_SERVICE_COEX_FREQAVOID, len); SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE, - WMI_SERVICE_PACKET_POWER_SAVE); + WMI_SERVICE_PACKET_POWER_SAVE, len); SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG, - WMI_SERVICE_FORCE_FW_HANG); + WMI_SERVICE_FORCE_FW_HANG, len); SVCMAP(WMI_MAIN_SERVICE_GPIO, - WMI_SERVICE_GPIO); + WMI_SERVICE_GPIO, len); SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM, - WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM); + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len); SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, - WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG); + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len); SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, - WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG); + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len); SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE, - WMI_SERVICE_STA_KEEP_ALIVE); + WMI_SERVICE_STA_KEEP_ALIVE, len); SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP, - WMI_SERVICE_TX_ENCAP); + WMI_SERVICE_TX_ENCAP, len); } #undef SVCMAP @@ -1428,11 +1431,11 @@ struct wmi_service_ready_event { * where FW can access this memory directly (or) by DMA. */ __le32 num_mem_reqs; - struct wlan_host_mem_req mem_reqs[1]; + struct wlan_host_mem_req mem_reqs[0]; } __packed; /* This is the definition from 10.X firmware branch */ -struct wmi_service_ready_event_10x { +struct wmi_10x_service_ready_event { __le32 sw_version; __le32 abi_version; @@ -1467,7 +1470,7 @@ struct wmi_service_ready_event_10x { */ __le32 num_mem_reqs; - struct wlan_host_mem_req mem_reqs[1]; + struct wlan_host_mem_req mem_reqs[0]; } __packed; #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) @@ -1883,38 +1886,26 @@ struct host_memory_chunk { __le32 size; } __packed; +struct wmi_host_mem_chunks { + __le32 count; + /* some fw revisions require at least 1 chunk regardless of count */ + struct host_memory_chunk items[1]; +} __packed; + struct wmi_init_cmd { struct wmi_resource_config resource_config; - __le32 num_host_mem_chunks; - - /* - * variable number of host memory chunks. - * This should be the last element in the structure - */ - struct host_memory_chunk host_mem_chunks[1]; + struct wmi_host_mem_chunks mem_chunks; } __packed; /* _10x stucture is from 10.X FW API */ struct wmi_init_cmd_10x { struct wmi_resource_config_10x resource_config; - __le32 num_host_mem_chunks; - - /* - * variable number of host memory chunks. - * This should be the last element in the structure - */ - struct host_memory_chunk host_mem_chunks[1]; + struct wmi_host_mem_chunks mem_chunks; } __packed; struct wmi_init_cmd_10_2 { struct wmi_resource_config_10_2 resource_config; - __le32 num_host_mem_chunks; - - /* - * variable number of host memory chunks. - * This should be the last element in the structure - */ - struct host_memory_chunk host_mem_chunks[1]; + struct wmi_host_mem_chunks mem_chunks; } __packed; struct wmi_chan_list_entry { @@ -1964,6 +1955,11 @@ struct wmi_ssid_list { #define WLAN_SCAN_PARAMS_MAX_BSSID 4 #define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 +/* Values lower than this may be refused by some firmware revisions with a scan + * completion with a timedout reason. + */ +#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40 + /* Scan priority numbers must be sequential, starting with 0 */ enum wmi_scan_priority { WMI_SCAN_PRIORITY_VERY_LOW = 0, @@ -1974,7 +1970,7 @@ enum wmi_scan_priority { WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */ }; -struct wmi_start_scan_cmd { +struct wmi_start_scan_common { /* Scan ID */ __le32 scan_id; /* Scan requestor ID */ @@ -2032,95 +2028,25 @@ struct wmi_start_scan_cmd { __le32 probe_delay; /* Scan control flags */ __le32 scan_ctrl_flags; - - /* Burst duration time in msecs */ - __le32 burst_duration; - /* - * TLV (tag length value ) paramerters follow the scan_cmd structure. - * TLV can contain channel list, bssid list, ssid list and - * ie. the TLV tags are defined above; - */ } __packed; -/* This is the definition from 10.X firmware branch */ -struct wmi_start_scan_cmd_10x { - /* Scan ID */ - __le32 scan_id; - - /* Scan requestor ID */ - __le32 scan_req_id; - - /* VDEV id(interface) that is requesting scan */ - __le32 vdev_id; - - /* Scan Priority, input to scan scheduler */ - __le32 scan_priority; - - /* Scan events subscription */ - __le32 notify_scan_events; - - /* dwell time in msec on active channels */ - __le32 dwell_time_active; - - /* dwell time in msec on passive channels */ - __le32 dwell_time_passive; - - /* - * min time in msec on the BSS channel,only valid if atleast one - * VDEV is active - */ - __le32 min_rest_time; - - /* - * max rest time in msec on the BSS channel,only valid if at least - * one VDEV is active - */ - /* - * the scanner will rest on the bss channel at least min_rest_time - * after min_rest_time the scanner will start checking for tx/rx - * activity on all VDEVs. if there is no activity the scanner will - * switch to off channel. if there is activity the scanner will let - * the radio on the bss channel until max_rest_time expires.at - * max_rest_time scanner will switch to off channel irrespective of - * activity. activity is determined by the idle_time parameter. - */ - __le32 max_rest_time; - - /* - * time before sending next set of probe requests. - * The scanner keeps repeating probe requests transmission with - * period specified by repeat_probe_time. - * The number of probe requests specified depends on the ssid_list - * and bssid_list - */ - __le32 repeat_probe_time; - - /* time in msec between 2 consequetive probe requests with in a set. */ - __le32 probe_spacing_time; - - /* - * data inactivity time in msec on bss channel that will be used by - * scanner for measuring the inactivity. - */ - __le32 idle_time; - - /* maximum time in msec allowed for scan */ - __le32 max_scan_time; - - /* - * delay in msec before sending first probe request after switching - * to a channel +struct wmi_start_scan_tlvs { + /* TLV parameters. These includes channel list, ssid list, bssid list, + * extra ies. */ - __le32 probe_delay; + u8 tlvs[0]; +} __packed; - /* Scan control flags */ - __le32 scan_ctrl_flags; +struct wmi_start_scan_cmd { + struct wmi_start_scan_common common; + __le32 burst_duration_ms; + struct wmi_start_scan_tlvs tlvs; +} __packed; - /* - * TLV (tag length value ) paramerters follow the scan_cmd structure. - * TLV can contain channel list, bssid list, ssid list and - * ie. the TLV tags are defined above; - */ +/* This is the definition from 10.X firmware branch */ +struct wmi_10x_start_scan_cmd { + struct wmi_start_scan_common common; + struct wmi_start_scan_tlvs tlvs; } __packed; struct wmi_ssid_arg { @@ -2306,94 +2232,25 @@ struct wmi_mgmt_rx_event_v2 { #define PHY_ERROR_FALSE_RADAR_EXT 0x24 #define PHY_ERROR_RADAR 0x05 -struct wmi_single_phyerr_rx_hdr { - /* TSF timestamp */ +struct wmi_phyerr { __le32 tsf_timestamp; - - /* - * Current freq1, freq2 - * - * [7:0]: freq1[lo] - * [15:8] : freq1[hi] - * [23:16]: freq2[lo] - * [31:24]: freq2[hi] - */ __le16 freq1; __le16 freq2; - - /* - * Combined RSSI over all chains and channel width for this PHY error - * - * [7:0]: RSSI combined - * [15:8]: Channel width (MHz) - * [23:16]: PHY error code - * [24:16]: reserved (future use) - */ u8 rssi_combined; u8 chan_width_mhz; u8 phy_err_code; u8 rsvd0; - - /* - * RSSI on chain 0 through 3 - * - * This is formatted the same as the PPDU_START RX descriptor - * field: - * - * [7:0]: pri20 - * [15:8]: sec20 - * [23:16]: sec40 - * [31:24]: sec80 - */ - - __le32 rssi_chain0; - __le32 rssi_chain1; - __le32 rssi_chain2; - __le32 rssi_chain3; - - /* - * Last calibrated NF value for chain 0 through 3 - * - * nf_list_1: - * - * + [15:0] - chain 0 - * + [31:16] - chain 1 - * - * nf_list_2: - * - * + [15:0] - chain 2 - * + [31:16] - chain 3 - */ - __le32 nf_list_1; - __le32 nf_list_2; - - /* Length of the frame */ + __le32 rssi_chains[4]; + __le16 nf_chains[4]; __le32 buf_len; + u8 buf[0]; } __packed; -struct wmi_single_phyerr_rx_event { - /* Phy error event header */ - struct wmi_single_phyerr_rx_hdr hdr; - /* frame buffer */ - u8 bufp[0]; -} __packed; - -struct wmi_comb_phyerr_rx_hdr { - /* Phy error phy error count */ - __le32 num_phyerr_events; +struct wmi_phyerr_event { + __le32 num_phyerrs; __le32 tsf_l32; __le32 tsf_u32; -} __packed; - -struct wmi_comb_phyerr_rx_event { - /* Phy error phy error count */ - struct wmi_comb_phyerr_rx_hdr hdr; - /* - * frame buffer - contains multiple payloads in the order: - * header - payload, header - payload... - * (The header is of type: wmi_single_phyerr_rx_hdr) - */ - u8 bufp[0]; + struct wmi_phyerr phyerrs[0]; } __packed; #define PHYERR_TLV_SIG 0xBB @@ -2908,11 +2765,6 @@ enum wmi_tp_scale { WMI_TP_SCALE_SIZE = 5, /* max num of enum */ }; -struct wmi_set_channel_cmd { - /* channel (only frequency and mode info are used) */ - struct wmi_channel chan; -} __packed; - struct wmi_pdev_chanlist_update_event { /* number of channels */ __le32 num_chan; @@ -2943,6 +2795,10 @@ struct wmi_pdev_set_channel_cmd { struct wmi_channel chan; } __packed; +struct wmi_pdev_pktlog_enable_cmd { + __le32 ev_bitmap; +} __packed; + /* Customize the DSCP (bit) to TID (0-7) mapping for QOS */ #define WMI_DSCP_MAP_MAX (64) struct wmi_pdev_set_dscp_tid_map_cmd { @@ -3177,7 +3033,7 @@ struct wmi_stats_event { * PDEV statistics * TODO: add all PDEV stats here */ -struct wmi_pdev_stats_old { +struct wmi_pdev_stats { __le32 chan_nf; /* Channel noise floor */ __le32 tx_frame_count; /* TX frame count */ __le32 rx_frame_count; /* RX frame count */ @@ -3188,15 +3044,8 @@ struct wmi_pdev_stats_old { struct wal_dbg_stats wal; /* WAL dbg stats */ } __packed; -struct wmi_pdev_stats_10x { - __le32 chan_nf; /* Channel noise floor */ - __le32 tx_frame_count; /* TX frame count */ - __le32 rx_frame_count; /* RX frame count */ - __le32 rx_clear_count; /* rx clear count */ - __le32 cycle_count; /* cycle count */ - __le32 phy_err_count; /* Phy error count */ - __le32 chan_tx_pwr; /* channel tx power */ - struct wal_dbg_stats wal; /* WAL dbg stats */ +struct wmi_10x_pdev_stats { + struct wmi_pdev_stats old; __le32 ack_rx_bad; __le32 rts_bad; __le32 rts_good; @@ -3217,16 +3066,14 @@ struct wmi_vdev_stats { * peer statistics. * TODO: add more stats */ -struct wmi_peer_stats_old { +struct wmi_peer_stats { struct wmi_mac_addr peer_macaddr; __le32 peer_rssi; __le32 peer_tx_rate; } __packed; -struct wmi_peer_stats_10x { - struct wmi_mac_addr peer_macaddr; - __le32 peer_rssi; - __le32 peer_tx_rate; +struct wmi_10x_peer_stats { + struct wmi_peer_stats old; __le32 peer_rx_rate; } __packed; @@ -4708,7 +4555,6 @@ struct wmi_dbglog_cfg_cmd { __le32 config_valid; } __packed; -#define ATH10K_RTS_MAX 2347 #define ATH10K_FRAGMT_THRESHOLD_MIN 540 #define ATH10K_FRAGMT_THRESHOLD_MAX 2346 @@ -4719,8 +4565,27 @@ struct wmi_dbglog_cfg_cmd { /* By default disable power save for IBSS */ #define ATH10K_DEFAULT_ATIM 0 +#define WMI_MAX_MEM_REQS 16 + +struct wmi_svc_rdy_ev_arg { + __le32 min_tx_power; + __le32 max_tx_power; + __le32 ht_cap; + __le32 vht_cap; + __le32 sw_ver0; + __le32 sw_ver1; + __le32 phy_capab; + __le32 num_rf_chains; + __le32 eeprom_rd; + __le32 num_mem_reqs; + const __le32 *service_map; + size_t service_map_len; + const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS]; +}; + struct ath10k; struct ath10k_vif; +struct ath10k_fw_stats; int ath10k_wmi_attach(struct ath10k *ar); void ath10k_wmi_detach(struct ath10k *ar); @@ -4732,8 +4597,6 @@ int ath10k_wmi_connect(struct ath10k *ar); struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len); int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); -int ath10k_wmi_pdev_set_channel(struct ath10k *ar, - const struct wmi_channel_arg *); int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt); int ath10k_wmi_pdev_resume_target(struct ath10k *ar); int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, @@ -4794,5 +4657,9 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar, enum wmi_force_fw_hang_type type, u32 delay_ms); int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable); +int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, + struct ath10k_fw_stats *stats); +int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_list); +int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar); #endif /* _WMI_H_ */ diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index ab2709a437686ecec7d6a858756c340184ad10b6..19eab2a69ad5b6830261d4edade793b657ed62ea 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -547,7 +547,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, static void -ath5k_sw_scan_start(struct ieee80211_hw *hw) +ath5k_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct ath5k_hw *ah = hw->priv; if (!ah->assoc) @@ -556,7 +558,7 @@ ath5k_sw_scan_start(struct ieee80211_hw *hw) static void -ath5k_sw_scan_complete(struct ieee80211_hw *hw) +ath5k_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath5k_hw *ah = hw->priv; ath5k_hw_set_ledstate(ah, ah->assoc ? diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c index 0583c69d26dbb4a95b99fa82704e8af14f27b9b8..ddaad712c59a814b52370f4b1a46ad8309b58c48 100644 --- a/drivers/net/wireless/ath/ath5k/qcu.c +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, } else { switch (queue_type) { case AR5K_TX_QUEUE_DATA: - for (queue = AR5K_TX_QUEUE_ID_DATA_MIN; - ah->ah_txq[queue].tqi_type != - AR5K_TX_QUEUE_INACTIVE; queue++) { - - if (queue > AR5K_TX_QUEUE_ID_DATA_MAX) - return -EINVAL; - } + queue = queue_info->tqi_subtype; break; case AR5K_TX_QUEUE_UAPSD: queue = AR5K_TX_QUEUE_ID_UAPSD; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index ba60e37213eb1d289fa983e8eacbce48c1616b0a..7a5337877a0c507d5c94b2002b2c4e7e9577d3f3 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -2976,11 +2976,11 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev) static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev, - const u8 *mac) + struct station_del_parameters *params) { struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl_vif *vif = netdev_priv(dev); - const u8 *addr = mac ? mac : bcast_addr; + const u8 *addr = params->mac ? params->mac : bcast_addr; return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, WMI_AP_DEAUTH, addr, WLAN_REASON_PREV_AUTH_NOT_VALID); diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h index 05debf700a846db00f55e0071df53e207d2e6c63..4f82e8632d37ba10a321e3182b6bb8ffa7d594f7 100644 --- a/drivers/net/wireless/ath/ath6kl/common.h +++ b/drivers/net/wireless/ath/ath6kl/common.h @@ -22,7 +22,7 @@ #define ATH6KL_MAX_IE 256 -__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...); +__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...); /* * Reflects the version of binary interface exposed by ATH6KL target diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c index 55c4064dd5067f26b8dfaf54689f85c9eb67a88e..81ba48d2938b2cfc3f11d22e6fb723a8f03f82e5 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@ -37,76 +37,64 @@ struct ath6kl_fwlog_slot { #define ATH6KL_FWLOG_VALID_MASK 0x1ffff -int ath6kl_printk(const char *level, const char *fmt, ...) +void ath6kl_printk(const char *level, const char *fmt, ...) { struct va_format vaf; va_list args; - int rtn; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; - rtn = printk("%sath6kl: %pV", level, &vaf); + printk("%sath6kl: %pV", level, &vaf); va_end(args); - - return rtn; } EXPORT_SYMBOL(ath6kl_printk); -int ath6kl_info(const char *fmt, ...) +void ath6kl_info(const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = ath6kl_printk(KERN_INFO, "%pV", &vaf); + ath6kl_printk(KERN_INFO, "%pV", &vaf); trace_ath6kl_log_info(&vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath6kl_info); -int ath6kl_err(const char *fmt, ...) +void ath6kl_err(const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = ath6kl_printk(KERN_ERR, "%pV", &vaf); + ath6kl_printk(KERN_ERR, "%pV", &vaf); trace_ath6kl_log_err(&vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath6kl_err); -int ath6kl_warn(const char *fmt, ...) +void ath6kl_warn(const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = ath6kl_printk(KERN_WARNING, "%pV", &vaf); + ath6kl_printk(KERN_WARNING, "%pV", &vaf); trace_ath6kl_log_warn(&vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath6kl_warn); diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h index e194c10d9f0071725c1c5eba917347ae8209b0a0..19106ed28961643870d36d95456ee80b3af6ec18 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.h +++ b/drivers/net/wireless/ath/ath6kl/debug.h @@ -50,10 +50,10 @@ enum ATH6K_DEBUG_MASK { }; extern unsigned int debug_mask; -__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...); -__printf(1, 2) int ath6kl_info(const char *fmt, ...); -__printf(1, 2) int ath6kl_err(const char *fmt, ...); -__printf(1, 2) int ath6kl_warn(const char *fmt, ...); +__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...); +__printf(1, 2) void ath6kl_info(const char *fmt, ...); +__printf(1, 2) void ath6kl_err(const char *fmt, ...); +__printf(1, 2) void ath6kl_warn(const char *fmt, ...); enum ath6kl_war { ATH6KL_WAR_INVALID_RATE, @@ -81,10 +81,9 @@ int ath6kl_debug_init_fs(struct ath6kl *ar); void ath6kl_debug_cleanup(struct ath6kl *ar); #else -static inline int ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask, - const char *fmt, ...) +static inline void ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask, + const char *fmt, ...) { - return 0; } static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask, diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index a6a5e40b3e981453b72233e930e712c8412bf60e..9da3594fd010d08a64994c848443076f4745a7a8 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -1193,18 +1193,10 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface) return 0; } -static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf) -{ - if (usb_get_intfdata(intf)) - ath6kl_usb_remove(intf); - return 0; -} - #else #define ath6kl_usb_pm_suspend NULL #define ath6kl_usb_pm_resume NULL -#define ath6kl_usb_pm_reset_resume NULL #endif @@ -1222,7 +1214,6 @@ static struct usb_driver ath6kl_usb_driver = { .probe = ath6kl_usb_probe, .suspend = ath6kl_usb_pm_suspend, .resume = ath6kl_usb_pm_resume, - .reset_resume = ath6kl_usb_pm_reset_resume, .disconnect = ath6kl_usb_remove, .id_table = ath6kl_usb_ids, .supports_autosuspend = true, diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 896e63281b3b8464b9bbdbd5bab6d9d1f09b4302..fee0cadb0f5ed592fba313f1040b2e1f5ed1ebde 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -3,6 +3,8 @@ config ATH9K_HW config ATH9K_COMMON tristate select ATH_COMMON + select DEBUG_FS + select RELAY config ATH9K_DFS_DEBUGFS def_bool y depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED @@ -148,6 +150,11 @@ config ATH9K_CHANNEL_CONTEXT for multi-channel concurrency. Enable this if P2P PowerSave support is required. +config ATH9K_PCOEM + bool "Atheros ath9k support for PC OEM cards" if EXPERT + depends on ATH9K + default y + config ATH9K_HTC tristate "Atheros HTC based wireless cards support" depends on USB && MAC80211 diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index 73704c1be736f188dd5cb9f9c746a51dc4d35a94..473972288a84a901a69cc555802b2d8228e274f7 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -16,8 +16,7 @@ ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o ath9k-$(CONFIG_ATH9K_TX99) += tx99.o ath9k-$(CONFIG_ATH9K_WOW) += wow.o -ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o \ - spectral.o +ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o ath9k-$(CONFIG_ATH9K_STATION_STATISTICS) += debug_sta.o @@ -32,7 +31,6 @@ ath9k_hw-y:= \ ar5008_phy.o \ ar9002_calib.o \ ar9003_calib.o \ - ar9003_rtt.o \ calib.o \ eeprom.o \ eeprom_def.o \ @@ -50,6 +48,8 @@ ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \ ar9003_mci.o +ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o + ath9k_hw-$(CONFIG_ATH9K_DYNACK) += dynack.o obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o @@ -58,7 +58,8 @@ obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o ath9k_common-y:= common.o \ common-init.o \ common-beacon.o \ - common-debug.o + common-debug.o \ + common-spectral.o ath9k_htc-y += htc_hst.o \ hif_usb.o \ diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index b72d0be716dbdc91c56f822f1ec3d352f82e4e80..5829074208fa8173c343c6eaee050c86c8d30c41 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -1190,7 +1190,7 @@ static void ar5008_hw_set_nf_limits(struct ath_hw *ah) static void ar5008_hw_set_radar_params(struct ath_hw *ah, struct ath_hw_radar_conf *conf) { - u32 radar_0 = 0, radar_1 = 0; + u32 radar_0 = 0, radar_1; if (!conf) { REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA); @@ -1204,6 +1204,9 @@ static void ar5008_hw_set_radar_params(struct ath_hw *ah, radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI); radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND); + radar_1 = REG_READ(ah, AR_PHY_RADAR_1); + radar_1 &= ~(AR_PHY_RADAR_1_MAXLEN | AR_PHY_RADAR_1_RELSTEP_THRESH | + AR_PHY_RADAR_1_RELPWR_THRESH); radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI; radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK; radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN); @@ -1225,7 +1228,7 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah) conf->fir_power = -33; conf->radar_rssi = 20; conf->pulse_height = 10; - conf->pulse_rssi = 24; + conf->pulse_rssi = 15; conf->pulse_inband = 15; conf->pulse_maxlen = 255; conf->pulse_inband_step = 12; diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index cdc74005650ce9848147ee0e051c64f9df989ffd..42190b67c6719594bcbe57b9d70e02186645bace 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -657,31 +657,29 @@ static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah) ar9280_hw_olc_temp_compensation(ah); } -static bool ar9002_hw_calibrate(struct ath_hw *ah, - struct ath9k_channel *chan, - u8 rxchainmask, - bool longcal) +static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, + u8 rxchainmask, bool longcal) { - bool iscaldone = true; struct ath9k_cal_list *currCal = ah->cal_list_curr; - bool nfcal, nfcal_pending = false; + bool nfcal, nfcal_pending = false, percal_pending; + int ret; nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF); if (ah->caldata) nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags); - if (currCal && !nfcal && - (currCal->calState == CAL_RUNNING || - currCal->calState == CAL_WAITING)) { - iscaldone = ar9002_hw_per_calibration(ah, chan, - rxchainmask, currCal); - if (iscaldone) { - ah->cal_list_curr = currCal = currCal->calNext; - - if (currCal->calState == CAL_WAITING) { - iscaldone = false; - ath9k_hw_reset_calibration(ah, currCal); - } + percal_pending = (currCal && + (currCal->calState == CAL_RUNNING || + currCal->calState == CAL_WAITING)); + + if (percal_pending && !nfcal) { + if (!ar9002_hw_per_calibration(ah, chan, rxchainmask, currCal)) + return 0; + + ah->cal_list_curr = currCal = currCal->calNext; + if (currCal->calState == CAL_WAITING) { + ath9k_hw_reset_calibration(ah, currCal); + return 0; } } @@ -698,7 +696,9 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah, * NF is slow time-variant, so it is OK to use a * historical value. */ - ath9k_hw_loadnf(ah, ah->curchan); + ret = ath9k_hw_loadnf(ah, ah->curchan); + if (ret < 0) + return ret; } if (longcal) { @@ -709,7 +709,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah, } } - return iscaldone; + return !percal_pending; } /* Carrier leakage Calibration fix */ @@ -856,6 +856,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) /* Do PA Calibration */ ar9002_hw_pa_cal(ah, true); + ath9k_hw_loadnf(ah, chan); + ath9k_hw_start_nfcal(ah, true); if (ah->caldata) set_bit(NFCAL_PENDING, &ah->caldata->cal_flags); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index 2a93519f4bdf43479a423a7f9ad5e8773f4b9ab9..f816909d9474e204c36abe20e73c16e0c259956b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -281,7 +281,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) - | SM(i->txpower, AR_XmitPower0) + | SM(i->txpower[0], AR_XmitPower0) | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) | (i->flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0) | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0) @@ -307,9 +307,9 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) | set11nRateFlags(i->rates, 3) | SM(i->rtscts_rate, AR_RTSCTSRate); - ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1); - ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2); - ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3); + ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1); + ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2); + ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3); } static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index 9a2afa2c690b761bef1e9203c60b2a3bb0e9e86f..fc08162b58200143d5324e9825cab8384b6b5118 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -643,9 +643,12 @@ static void ar9002_hw_spectral_scan_config(struct ath_hw *ah, * and fix otherwise. */ count = param->count; - if (param->endless) - count = 0x80; - else if (count & 0x80) + if (param->endless) { + if (AR_SREV_9271(ah)) + count = 0; + else + count = 0x80; + } else if (count & 0x80) count = 0x7f; REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index ac8301ef52420a00009f4d78b82e700987ea7508..06ab71db6e804b76a867cc036cb27887a2ac5b62 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -121,13 +121,12 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah, return iscaldone; } -static bool ar9003_hw_calibrate(struct ath_hw *ah, - struct ath9k_channel *chan, - u8 rxchainmask, - bool longcal) +static int ar9003_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, + u8 rxchainmask, bool longcal) { bool iscaldone = true; struct ath9k_cal_list *currCal = ah->cal_list_curr; + int ret; /* * For given calibration: @@ -163,7 +162,9 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah, * NF is slow time-variant, so it is OK to use a historical * value. */ - ath9k_hw_loadnf(ah, ah->curchan); + ret = ath9k_hw_loadnf(ah, ah->curchan); + if (ret < 0) + return ret; /* start NF calibration, without updating BB NF register */ ath9k_hw_start_nfcal(ah, false); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 80c6eacbda53349727fe4bfba72e32eb2abe28fd..08225a0067c2d8b75bddd1021359c654b5ffb012 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4079,27 +4079,28 @@ static int ar9003_hw_get_thermometer(struct ath_hw *ah) static void ar9003_hw_thermometer_apply(struct ath_hw *ah) { + struct ath9k_hw_capabilities *pCap = &ah->caps; int thermometer = ar9003_hw_get_thermometer(ah); u8 therm_on = (thermometer < 0) ? 0 : 1; REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); - if (ah->caps.tx_chainmask & BIT(1)) + if (pCap->chip_chainmask & BIT(1)) REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); - if (ah->caps.tx_chainmask & BIT(2)) + if (pCap->chip_chainmask & BIT(2)) REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); therm_on = (thermometer < 0) ? 0 : (thermometer == 0); REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); - if (ah->caps.tx_chainmask & BIT(1)) { + if (pCap->chip_chainmask & BIT(1)) { therm_on = (thermometer < 0) ? 0 : (thermometer == 1); REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); } - if (ah->caps.tx_chainmask & BIT(2)) { + if (pCap->chip_chainmask & BIT(2)) { therm_on = (thermometer < 0) ? 0 : (thermometer == 2); REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); @@ -4376,6 +4377,25 @@ static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah, targetPowerArray, numPiers); } +static void ar9003_hw_selfgen_tpc_txpower(struct ath_hw *ah, + struct ath9k_channel *chan, + u8 *pwr_array) +{ + u32 val; + + /* target power values for self generated frames (ACK,RTS/CTS) */ + if (IS_CHAN_2GHZ(chan)) { + val = SM(pwr_array[ALL_TARGET_LEGACY_1L_5L], AR_TPC_ACK) | + SM(pwr_array[ALL_TARGET_LEGACY_1L_5L], AR_TPC_CTS) | + SM(0x3f, AR_TPC_CHIRP) | SM(0x3f, AR_TPC_RPT); + } else { + val = SM(pwr_array[ALL_TARGET_LEGACY_6_24], AR_TPC_ACK) | + SM(pwr_array[ALL_TARGET_LEGACY_6_24], AR_TPC_CTS) | + SM(0x3f, AR_TPC_CHIRP) | SM(0x3f, AR_TPC_RPT); + } + REG_WRITE(ah, AR_TPC, val); +} + /* Set tx power registers to array of values passed in */ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) { @@ -5311,6 +5331,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, struct ar9300_modal_eep_header *modal_hdr; u8 targetPowerValT2[ar9300RateSize]; u8 target_power_val_t2_eep[ar9300RateSize]; + u8 targetPowerValT2_tpc[ar9300RateSize]; unsigned int i = 0, paprd_scale_factor = 0; u8 pwr_idx, min_pwridx = 0; @@ -5362,6 +5383,9 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, twiceAntennaReduction, powerLimit); + memcpy(targetPowerValT2_tpc, targetPowerValT2, + sizeof(targetPowerValT2)); + if (ar9003_is_paprd_enabled(ah)) { for (i = 0; i < ar9300RateSize; i++) { if ((ah->paprd_ratemask & (1 << i)) && @@ -5395,6 +5419,30 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); ar9003_hw_calibration_apply(ah, chan->channel); ar9003_paprd_set_txpower(ah, chan, targetPowerValT2); + + ar9003_hw_selfgen_tpc_txpower(ah, chan, targetPowerValT2); + + /* TPC initializations */ + if (ah->tpc_enabled) { + u32 val; + + ar9003_hw_init_rate_txpower(ah, targetPowerValT2_tpc, chan); + + /* Enable TPC */ + REG_WRITE(ah, AR_PHY_PWRTX_MAX, + AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE); + /* Disable per chain power reduction */ + val = REG_READ(ah, AR_PHY_POWER_TX_SUB); + if (AR_SREV_9340(ah)) + REG_WRITE(ah, AR_PHY_POWER_TX_SUB, + val & 0xFFFFFFC0); + else + REG_WRITE(ah, AR_PHY_POWER_TX_SUB, + val & 0xFFFFF000); + } else { + /* Disable TPC */ + REG_WRITE(ah, AR_PHY_PWRTX_MAX, 0); + } } static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index ddef9eedbac6f0d5e0b2ea2d0bfacda4f96e63c1..06ad2172030e7d63f55390b191bb2e7f9adb1c78 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -333,12 +333,29 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) qca953x_1p0_soc_preamble); INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], qca953x_1p0_soc_postamble); - INIT_INI_ARRAY(&ah->iniModesRxGain, - qca953x_1p0_common_wo_xlna_rx_gain_table); - INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds, - qca953x_1p0_common_wo_xlna_rx_gain_bounds); - INIT_INI_ARRAY(&ah->iniModesTxGain, - qca953x_1p0_modes_no_xpa_tx_gain_table); + + if (AR_SREV_9531_20(ah)) { + INIT_INI_ARRAY(&ah->iniModesRxGain, + qca953x_2p0_common_wo_xlna_rx_gain_table); + INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds, + qca953x_2p0_common_wo_xlna_rx_gain_bounds); + } else { + INIT_INI_ARRAY(&ah->iniModesRxGain, + qca953x_1p0_common_wo_xlna_rx_gain_table); + INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds, + qca953x_1p0_common_wo_xlna_rx_gain_bounds); + } + + if (AR_SREV_9531_20(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + qca953x_2p0_modes_no_xpa_tx_gain_table); + else if (AR_SREV_9531_11(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + qca953x_1p1_modes_no_xpa_tx_gain_table); + else + INIT_INI_ARRAY(&ah->iniModesTxGain, + qca953x_1p0_modes_no_xpa_tx_gain_table); + INIT_INI_ARRAY(&ah->iniModesFastClock, qca953x_1p0_modes_fast_clock); } else if (AR_SREV_9580(ah)) { @@ -518,9 +535,15 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah) else if (AR_SREV_9550(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar955x_1p0_modes_xpa_tx_gain_table); - else if (AR_SREV_9531(ah)) + else if (AR_SREV_9531_10(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + qca953x_1p0_modes_xpa_tx_gain_table); + else if (AR_SREV_9531_11(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + qca953x_1p1_modes_xpa_tx_gain_table); + else if (AR_SREV_9531_20(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, - qca953x_1p0_modes_xpa_tx_gain_table); + qca953x_2p0_modes_xpa_tx_gain_table); else if (AR_SREV_9580(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9580_1p0_lowest_ob_db_tx_gain_table); @@ -562,7 +585,10 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah) INIT_INI_ARRAY(&ah->iniModesTxGain, ar955x_1p0_modes_no_xpa_tx_gain_table); else if (AR_SREV_9531(ah)) { - if (AR_SREV_9531_11(ah)) + if (AR_SREV_9531_20(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + qca953x_2p0_modes_no_xpa_tx_gain_table); + else if (AR_SREV_9531_11(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, qca953x_1p1_modes_no_xpa_tx_gain_table); else @@ -670,9 +696,6 @@ static void ar9003_tx_gain_table_mode5(struct ath_hw *ah) if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_green_ob_db_tx_gain_1_1); - else if (AR_SREV_9340(ah)) - INIT_INI_ARRAY(&ah->iniModesTxGain, - ar9340Modes_ub124_tx_gain_table_1p0); else if (AR_SREV_9580(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9580_1p0_type5_tx_gain_table); @@ -792,11 +815,16 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah) ar955x_1p0_common_wo_xlna_rx_gain_table); INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds, ar955x_1p0_common_wo_xlna_rx_gain_bounds); - } else if (AR_SREV_9531(ah)) { + } else if (AR_SREV_9531_10(ah) || AR_SREV_9531_11(ah)) { INIT_INI_ARRAY(&ah->iniModesRxGain, qca953x_1p0_common_wo_xlna_rx_gain_table); INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds, qca953x_1p0_common_wo_xlna_rx_gain_bounds); + } else if (AR_SREV_9531_20(ah)) { + INIT_INI_ARRAY(&ah->iniModesRxGain, + qca953x_2p0_common_wo_xlna_rx_gain_table); + INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds, + qca953x_2p0_common_wo_xlna_rx_gain_bounds); } else if (AR_SREV_9580(ah)) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9580_1p0_wo_xlna_rx_gain_table); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 057b1657c4287f1418058ac3c4f8ec6a305619f9..da84b705cbcdc476734ac5f2c41dc82443e744d6 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -101,7 +101,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) - | SM(i->txpower, AR_XmitPower0) + | SM(i->txpower[0], AR_XmitPower0) | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0) | (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0) @@ -152,9 +152,9 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; - ACCESS_ONCE(ads->ctl20) = SM(i->txpower, AR_XmitPower1); - ACCESS_ONCE(ads->ctl21) = SM(i->txpower, AR_XmitPower2); - ACCESS_ONCE(ads->ctl22) = SM(i->txpower, AR_XmitPower3); + ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1); + ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2); + ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3); } static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 1e8ea5e4d4ca71ecd7598aea30d5a76f4749e85a..ae6cde273414cfff67e0c455770f75c2b9243f16 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -18,6 +18,21 @@ #include "hw.h" #include "ar9003_phy.h" +#define AR9300_OFDM_RATES 8 +#define AR9300_HT_SS_RATES 8 +#define AR9300_HT_DS_RATES 8 +#define AR9300_HT_TS_RATES 8 + +#define AR9300_11NA_OFDM_SHIFT 0 +#define AR9300_11NA_HT_SS_SHIFT 8 +#define AR9300_11NA_HT_DS_SHIFT 16 +#define AR9300_11NA_HT_TS_SHIFT 24 + +#define AR9300_11NG_OFDM_SHIFT 4 +#define AR9300_11NG_HT_SS_SHIFT 12 +#define AR9300_11NG_HT_DS_SHIFT 20 +#define AR9300_11NG_HT_TS_SHIFT 28 + static const int firstep_table[] = /* level: 0 1 2 3 4 5 6 7 8 */ { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */ @@ -40,6 +55,71 @@ static const int m2ThreshLowExt_off = 127; static const int m1ThreshExt_off = 127; static const int m2ThreshExt_off = 127; +static const u8 ofdm2pwr[] = { + ALL_TARGET_LEGACY_6_24, + ALL_TARGET_LEGACY_6_24, + ALL_TARGET_LEGACY_6_24, + ALL_TARGET_LEGACY_6_24, + ALL_TARGET_LEGACY_6_24, + ALL_TARGET_LEGACY_36, + ALL_TARGET_LEGACY_48, + ALL_TARGET_LEGACY_54 +}; + +static const u8 mcs2pwr_ht20[] = { + ALL_TARGET_HT20_0_8_16, + ALL_TARGET_HT20_1_3_9_11_17_19, + ALL_TARGET_HT20_1_3_9_11_17_19, + ALL_TARGET_HT20_1_3_9_11_17_19, + ALL_TARGET_HT20_4, + ALL_TARGET_HT20_5, + ALL_TARGET_HT20_6, + ALL_TARGET_HT20_7, + ALL_TARGET_HT20_0_8_16, + ALL_TARGET_HT20_1_3_9_11_17_19, + ALL_TARGET_HT20_1_3_9_11_17_19, + ALL_TARGET_HT20_1_3_9_11_17_19, + ALL_TARGET_HT20_12, + ALL_TARGET_HT20_13, + ALL_TARGET_HT20_14, + ALL_TARGET_HT20_15, + ALL_TARGET_HT20_0_8_16, + ALL_TARGET_HT20_1_3_9_11_17_19, + ALL_TARGET_HT20_1_3_9_11_17_19, + ALL_TARGET_HT20_1_3_9_11_17_19, + ALL_TARGET_HT20_20, + ALL_TARGET_HT20_21, + ALL_TARGET_HT20_22, + ALL_TARGET_HT20_23 +}; + +static const u8 mcs2pwr_ht40[] = { + ALL_TARGET_HT40_0_8_16, + ALL_TARGET_HT40_1_3_9_11_17_19, + ALL_TARGET_HT40_1_3_9_11_17_19, + ALL_TARGET_HT40_1_3_9_11_17_19, + ALL_TARGET_HT40_4, + ALL_TARGET_HT40_5, + ALL_TARGET_HT40_6, + ALL_TARGET_HT40_7, + ALL_TARGET_HT40_0_8_16, + ALL_TARGET_HT40_1_3_9_11_17_19, + ALL_TARGET_HT40_1_3_9_11_17_19, + ALL_TARGET_HT40_1_3_9_11_17_19, + ALL_TARGET_HT40_12, + ALL_TARGET_HT40_13, + ALL_TARGET_HT40_14, + ALL_TARGET_HT40_15, + ALL_TARGET_HT40_0_8_16, + ALL_TARGET_HT40_1_3_9_11_17_19, + ALL_TARGET_HT40_1_3_9_11_17_19, + ALL_TARGET_HT40_1_3_9_11_17_19, + ALL_TARGET_HT40_20, + ALL_TARGET_HT40_21, + ALL_TARGET_HT40_22, + ALL_TARGET_HT40_23, +}; + /** * ar9003_hw_set_channel - set channel on single-chip device * @ah: atheros hardware structure @@ -1361,7 +1441,7 @@ static void ar9003_hw_set_radar_params(struct ath_hw *ah, struct ath_hw_radar_conf *conf) { unsigned int regWrites = 0; - u32 radar_0 = 0, radar_1 = 0; + u32 radar_0 = 0, radar_1; if (!conf) { REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA); @@ -1375,6 +1455,9 @@ static void ar9003_hw_set_radar_params(struct ath_hw *ah, radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI); radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND); + radar_1 = REG_READ(ah, AR_PHY_RADAR_1); + radar_1 &= ~(AR_PHY_RADAR_1_MAXLEN | AR_PHY_RADAR_1_RELSTEP_THRESH | + AR_PHY_RADAR_1_RELPWR_THRESH); radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI; radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK; radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN); @@ -1401,7 +1484,7 @@ static void ar9003_hw_set_radar_conf(struct ath_hw *ah) conf->fir_power = -28; conf->radar_rssi = 0; conf->pulse_height = 10; - conf->pulse_rssi = 24; + conf->pulse_rssi = 15; conf->pulse_inband = 8; conf->pulse_maxlen = 255; conf->pulse_inband_step = 12; @@ -1796,6 +1879,100 @@ static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower) ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0)); } +static void ar9003_hw_init_txpower_cck(struct ath_hw *ah, u8 *rate_array) +{ + ah->tx_power[0] = rate_array[ALL_TARGET_LEGACY_1L_5L]; + ah->tx_power[1] = rate_array[ALL_TARGET_LEGACY_1L_5L]; + ah->tx_power[2] = min(rate_array[ALL_TARGET_LEGACY_1L_5L], + rate_array[ALL_TARGET_LEGACY_5S]); + ah->tx_power[3] = min(rate_array[ALL_TARGET_LEGACY_11L], + rate_array[ALL_TARGET_LEGACY_11S]); +} + +static void ar9003_hw_init_txpower_ofdm(struct ath_hw *ah, u8 *rate_array, + int offset) +{ + int i, j; + + for (i = offset; i < offset + AR9300_OFDM_RATES; i++) { + /* OFDM rate to power table idx */ + j = ofdm2pwr[i - offset]; + ah->tx_power[i] = rate_array[j]; + } +} + +static void ar9003_hw_init_txpower_ht(struct ath_hw *ah, u8 *rate_array, + int ss_offset, int ds_offset, + int ts_offset, bool is_40) +{ + int i, j, mcs_idx = 0; + const u8 *mcs2pwr = (is_40) ? mcs2pwr_ht40 : mcs2pwr_ht20; + + for (i = ss_offset; i < ss_offset + AR9300_HT_SS_RATES; i++) { + j = mcs2pwr[mcs_idx]; + ah->tx_power[i] = rate_array[j]; + mcs_idx++; + } + + for (i = ds_offset; i < ds_offset + AR9300_HT_DS_RATES; i++) { + j = mcs2pwr[mcs_idx]; + ah->tx_power[i] = rate_array[j]; + mcs_idx++; + } + + for (i = ts_offset; i < ts_offset + AR9300_HT_TS_RATES; i++) { + j = mcs2pwr[mcs_idx]; + ah->tx_power[i] = rate_array[j]; + mcs_idx++; + } +} + +static void ar9003_hw_init_txpower_stbc(struct ath_hw *ah, int ss_offset, + int ds_offset, int ts_offset) +{ + memcpy(&ah->tx_power_stbc[ss_offset], &ah->tx_power[ss_offset], + AR9300_HT_SS_RATES); + memcpy(&ah->tx_power_stbc[ds_offset], &ah->tx_power[ds_offset], + AR9300_HT_DS_RATES); + memcpy(&ah->tx_power_stbc[ts_offset], &ah->tx_power[ts_offset], + AR9300_HT_TS_RATES); +} + +void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array, + struct ath9k_channel *chan) +{ + if (IS_CHAN_5GHZ(chan)) { + ar9003_hw_init_txpower_ofdm(ah, rate_array, + AR9300_11NA_OFDM_SHIFT); + if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) { + ar9003_hw_init_txpower_ht(ah, rate_array, + AR9300_11NA_HT_SS_SHIFT, + AR9300_11NA_HT_DS_SHIFT, + AR9300_11NA_HT_TS_SHIFT, + IS_CHAN_HT40(chan)); + ar9003_hw_init_txpower_stbc(ah, + AR9300_11NA_HT_SS_SHIFT, + AR9300_11NA_HT_DS_SHIFT, + AR9300_11NA_HT_TS_SHIFT); + } + } else { + ar9003_hw_init_txpower_cck(ah, rate_array); + ar9003_hw_init_txpower_ofdm(ah, rate_array, + AR9300_11NG_OFDM_SHIFT); + if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) { + ar9003_hw_init_txpower_ht(ah, rate_array, + AR9300_11NG_HT_SS_SHIFT, + AR9300_11NG_HT_DS_SHIFT, + AR9300_11NG_HT_TS_SHIFT, + IS_CHAN_HT40(chan)); + ar9003_hw_init_txpower_stbc(ah, + AR9300_11NG_HT_SS_SHIFT, + AR9300_11NG_HT_DS_SHIFT, + AR9300_11NG_HT_TS_SHIFT); + } + } +} + void ar9003_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h index a43b30d723a426bc1a2b3d8aac67b99942ea3df0..6290467a75a04ab94e7779f674a1c1ceb751bd3e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h @@ -17,6 +17,7 @@ #ifndef AR9003_RTT_H #define AR9003_RTT_H +#ifdef CONFIG_ATH9K_PCOEM void ar9003_hw_rtt_enable(struct ath_hw *ah); void ar9003_hw_rtt_disable(struct ath_hw *ah); void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask); @@ -25,5 +26,40 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah); void ar9003_hw_rtt_fill_hist(struct ath_hw *ah); void ar9003_hw_rtt_clear_hist(struct ath_hw *ah); bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan); +#else +static inline void ar9003_hw_rtt_enable(struct ath_hw *ah) +{ +} + +static inline void ar9003_hw_rtt_disable(struct ath_hw *ah) +{ +} + +static inline void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask) +{ +} + +static inline bool ar9003_hw_rtt_force_restore(struct ath_hw *ah) +{ + return false; +} + +static inline void ar9003_hw_rtt_load_hist(struct ath_hw *ah) +{ +} + +static inline void ar9003_hw_rtt_fill_hist(struct ath_hw *ah) +{ +} + +static inline void ar9003_hw_rtt_clear_hist(struct ath_hw *ah) +{ +} + +static inline bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan) +{ + return false; +} +#endif #endif diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h index 812a9d787bf353236ebc906450583abce3c58a97..159cc6fd2362b4ac454895eed67a0c0f090ad3c5 100644 --- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h @@ -20,6 +20,8 @@ #define qca953x_1p0_mac_postamble ar9300_2p2_mac_postamble +#define qca953x_1p0_soc_preamble ar955x_1p0_soc_preamble + #define qca953x_1p0_soc_postamble ar9300_2p2_soc_postamble #define qca953x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2 @@ -28,6 +30,10 @@ #define qca953x_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2 +#define qca953x_1p0_common_wo_xlna_rx_gain_bounds ar955x_1p0_common_wo_xlna_rx_gain_bounds + +#define qca953x_1p0_common_rx_gain_bounds ar955x_1p0_common_rx_gain_bounds + static const u32 qca953x_1p0_mac_core[][2] = { /* Addr allmodes */ {0x00000008, 0x00000000}, @@ -490,35 +496,6 @@ static const u32 qca953x_1p0_radio_postamble[][5] = { {0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000}, }; -static const u32 qca953x_1p0_soc_preamble[][2] = { - /* Addr allmodes */ - {0x00007000, 0x00000000}, - {0x00007004, 0x00000000}, - {0x00007008, 0x00000000}, - {0x0000700c, 0x00000000}, - {0x0000701c, 0x00000000}, - {0x00007020, 0x00000000}, - {0x00007024, 0x00000000}, - {0x00007028, 0x00000000}, - {0x0000702c, 0x00000000}, - {0x00007030, 0x00000000}, - {0x00007034, 0x00000002}, - {0x00007038, 0x000004c2}, - {0x00007048, 0x00000000}, -}; - -static const u32 qca953x_1p0_common_rx_gain_bounds[][5] = { - /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, - {0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018}, -}; - -static const u32 qca953x_1p0_common_wo_xlna_rx_gain_bounds[][5] = { - /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, - {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, -}; - static const u32 qca953x_1p0_modes_xpa_tx_gain_table[][2] = { /* Addr allmodes */ {0x0000a2dc, 0xfffd5aaa}, @@ -715,8 +692,73 @@ static const u32 qca953x_1p1_modes_no_xpa_tx_gain_table[][2] = { {0x00016448, 0x6c927a70}, }; +static const u32 qca953x_1p1_modes_xpa_tx_gain_table[][2] = { + /* Addr allmodes */ + {0x0000a2dc, 0xfffb52aa}, + {0x0000a2e0, 0xfffd64cc}, + {0x0000a2e4, 0xfffe80f0}, + {0x0000a2e8, 0xffffff00}, + {0x0000a410, 0x000050d5}, + {0x0000a500, 0x00000000}, + {0x0000a504, 0x04000002}, + {0x0000a508, 0x08000004}, + {0x0000a50c, 0x0c000006}, + {0x0000a510, 0x1000000a}, + {0x0000a514, 0x1400000c}, + {0x0000a518, 0x1800000e}, + {0x0000a51c, 0x1c000048}, + {0x0000a520, 0x2000004a}, + {0x0000a524, 0x2400004c}, + {0x0000a528, 0x2800004e}, + {0x0000a52c, 0x2b00024a}, + {0x0000a530, 0x2f00024c}, + {0x0000a534, 0x3300024e}, + {0x0000a538, 0x36000668}, + {0x0000a53c, 0x38000669}, + {0x0000a540, 0x3a000868}, + {0x0000a544, 0x3d00086a}, + {0x0000a548, 0x4000086c}, + {0x0000a54c, 0x4200086e}, + {0x0000a550, 0x43000a6e}, + {0x0000a554, 0x43000a6e}, + {0x0000a558, 0x43000a6e}, + {0x0000a55c, 0x43000a6e}, + {0x0000a560, 0x43000a6e}, + {0x0000a564, 0x43000a6e}, + {0x0000a568, 0x43000a6e}, + {0x0000a56c, 0x43000a6e}, + {0x0000a570, 0x43000a6e}, + {0x0000a574, 0x43000a6e}, + {0x0000a578, 0x43000a6e}, + {0x0000a57c, 0x43000a6e}, + {0x0000a600, 0x00000000}, + {0x0000a604, 0x00000000}, + {0x0000a608, 0x00000000}, + {0x0000a60c, 0x03804000}, + {0x0000a610, 0x03804e01}, + {0x0000a614, 0x03804e01}, + {0x0000a618, 0x03804e01}, + {0x0000a61c, 0x04009002}, + {0x0000a620, 0x04009002}, + {0x0000a624, 0x04009002}, + {0x0000a628, 0x04009002}, + {0x0000a62c, 0x04009002}, + {0x0000a630, 0x04009002}, + {0x0000a634, 0x04009002}, + {0x0000a638, 0x04009002}, + {0x0000a63c, 0x04009002}, + {0x0000b2dc, 0xfffb52aa}, + {0x0000b2e0, 0xfffd64cc}, + {0x0000b2e4, 0xfffe80f0}, + {0x0000b2e8, 0xffffff00}, + {0x00016044, 0x024922db}, + {0x00016048, 0x6c927a70}, + {0x00016444, 0x024922db}, + {0x00016448, 0x6c927a70}, +}; + static const u32 qca953x_2p0_baseband_core[][2] = { - /* Addr allmodes */ + /* Addr allmodes */ {0x00009800, 0xafe68e30}, {0x00009804, 0xfd14e000}, {0x00009808, 0x9c0a9f6b}, @@ -914,4 +956,400 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = { {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, }; +static const u32 qca953x_2p0_common_wo_xlna_rx_gain_table[][2] = { + /* Addr allmodes */ + {0x0000a000, 0x00010000}, + {0x0000a004, 0x00030002}, + {0x0000a008, 0x00050004}, + {0x0000a00c, 0x00810080}, + {0x0000a010, 0x00830082}, + {0x0000a014, 0x01810180}, + {0x0000a018, 0x01830182}, + {0x0000a01c, 0x01850184}, + {0x0000a020, 0x01890188}, + {0x0000a024, 0x018b018a}, + {0x0000a028, 0x018d018c}, + {0x0000a02c, 0x03820190}, + {0x0000a030, 0x03840383}, + {0x0000a034, 0x03880385}, + {0x0000a038, 0x038a0389}, + {0x0000a03c, 0x038c038b}, + {0x0000a040, 0x0390038d}, + {0x0000a044, 0x03920391}, + {0x0000a048, 0x03940393}, + {0x0000a04c, 0x03960395}, + {0x0000a050, 0x00000000}, + {0x0000a054, 0x00000000}, + {0x0000a058, 0x00000000}, + {0x0000a05c, 0x00000000}, + {0x0000a060, 0x00000000}, + {0x0000a064, 0x00000000}, + {0x0000a068, 0x00000000}, + {0x0000a06c, 0x00000000}, + {0x0000a070, 0x00000000}, + {0x0000a074, 0x00000000}, + {0x0000a078, 0x00000000}, + {0x0000a07c, 0x00000000}, + {0x0000a080, 0x29292929}, + {0x0000a084, 0x29292929}, + {0x0000a088, 0x29292929}, + {0x0000a08c, 0x29292929}, + {0x0000a090, 0x22292929}, + {0x0000a094, 0x1d1d2222}, + {0x0000a098, 0x0c111117}, + {0x0000a09c, 0x00030303}, + {0x0000a0a0, 0x00000000}, + {0x0000a0a4, 0x00000000}, + {0x0000a0a8, 0x00000000}, + {0x0000a0ac, 0x00000000}, + {0x0000a0b0, 0x00000000}, + {0x0000a0b4, 0x00000000}, + {0x0000a0b8, 0x00000000}, + {0x0000a0bc, 0x00000000}, + {0x0000a0c0, 0x001f0000}, + {0x0000a0c4, 0x01000101}, + {0x0000a0c8, 0x011e011f}, + {0x0000a0cc, 0x011c011d}, + {0x0000a0d0, 0x02030204}, + {0x0000a0d4, 0x02010202}, + {0x0000a0d8, 0x021f0200}, + {0x0000a0dc, 0x0302021e}, + {0x0000a0e0, 0x03000301}, + {0x0000a0e4, 0x031e031f}, + {0x0000a0e8, 0x0402031d}, + {0x0000a0ec, 0x04000401}, + {0x0000a0f0, 0x041e041f}, + {0x0000a0f4, 0x0502041d}, + {0x0000a0f8, 0x05000501}, + {0x0000a0fc, 0x051e051f}, + {0x0000a100, 0x06010602}, + {0x0000a104, 0x061f0600}, + {0x0000a108, 0x061d061e}, + {0x0000a10c, 0x07020703}, + {0x0000a110, 0x07000701}, + {0x0000a114, 0x00000000}, + {0x0000a118, 0x00000000}, + {0x0000a11c, 0x00000000}, + {0x0000a120, 0x00000000}, + {0x0000a124, 0x00000000}, + {0x0000a128, 0x00000000}, + {0x0000a12c, 0x00000000}, + {0x0000a130, 0x00000000}, + {0x0000a134, 0x00000000}, + {0x0000a138, 0x00000000}, + {0x0000a13c, 0x00000000}, + {0x0000a140, 0x001f0000}, + {0x0000a144, 0x01000101}, + {0x0000a148, 0x011e011f}, + {0x0000a14c, 0x011c011d}, + {0x0000a150, 0x02030204}, + {0x0000a154, 0x02010202}, + {0x0000a158, 0x021f0200}, + {0x0000a15c, 0x0302021e}, + {0x0000a160, 0x03000301}, + {0x0000a164, 0x031e031f}, + {0x0000a168, 0x0402031d}, + {0x0000a16c, 0x04000401}, + {0x0000a170, 0x041e041f}, + {0x0000a174, 0x0502041d}, + {0x0000a178, 0x05000501}, + {0x0000a17c, 0x051e051f}, + {0x0000a180, 0x06010602}, + {0x0000a184, 0x061f0600}, + {0x0000a188, 0x061d061e}, + {0x0000a18c, 0x07020703}, + {0x0000a190, 0x07000701}, + {0x0000a194, 0x00000000}, + {0x0000a198, 0x00000000}, + {0x0000a19c, 0x00000000}, + {0x0000a1a0, 0x00000000}, + {0x0000a1a4, 0x00000000}, + {0x0000a1a8, 0x00000000}, + {0x0000a1ac, 0x00000000}, + {0x0000a1b0, 0x00000000}, + {0x0000a1b4, 0x00000000}, + {0x0000a1b8, 0x00000000}, + {0x0000a1bc, 0x00000000}, + {0x0000a1c0, 0x00000000}, + {0x0000a1c4, 0x00000000}, + {0x0000a1c8, 0x00000000}, + {0x0000a1cc, 0x00000000}, + {0x0000a1d0, 0x00000000}, + {0x0000a1d4, 0x00000000}, + {0x0000a1d8, 0x00000000}, + {0x0000a1dc, 0x00000000}, + {0x0000a1e0, 0x00000000}, + {0x0000a1e4, 0x00000000}, + {0x0000a1e8, 0x00000000}, + {0x0000a1ec, 0x00000000}, + {0x0000a1f0, 0x00000396}, + {0x0000a1f4, 0x00000396}, + {0x0000a1f8, 0x00000396}, + {0x0000a1fc, 0x00000196}, + {0x0000b000, 0x00010000}, + {0x0000b004, 0x00030002}, + {0x0000b008, 0x00050004}, + {0x0000b00c, 0x00810080}, + {0x0000b010, 0x00830082}, + {0x0000b014, 0x01810180}, + {0x0000b018, 0x01830182}, + {0x0000b01c, 0x01850184}, + {0x0000b020, 0x02810280}, + {0x0000b024, 0x02830282}, + {0x0000b028, 0x02850284}, + {0x0000b02c, 0x02890288}, + {0x0000b030, 0x028b028a}, + {0x0000b034, 0x0388028c}, + {0x0000b038, 0x038a0389}, + {0x0000b03c, 0x038c038b}, + {0x0000b040, 0x0390038d}, + {0x0000b044, 0x03920391}, + {0x0000b048, 0x03940393}, + {0x0000b04c, 0x03960395}, + {0x0000b050, 0x00000000}, + {0x0000b054, 0x00000000}, + {0x0000b058, 0x00000000}, + {0x0000b05c, 0x00000000}, + {0x0000b060, 0x00000000}, + {0x0000b064, 0x00000000}, + {0x0000b068, 0x00000000}, + {0x0000b06c, 0x00000000}, + {0x0000b070, 0x00000000}, + {0x0000b074, 0x00000000}, + {0x0000b078, 0x00000000}, + {0x0000b07c, 0x00000000}, + {0x0000b080, 0x32323232}, + {0x0000b084, 0x2f2f3232}, + {0x0000b088, 0x23282a2d}, + {0x0000b08c, 0x1c1e2123}, + {0x0000b090, 0x14171919}, + {0x0000b094, 0x0e0e1214}, + {0x0000b098, 0x03050707}, + {0x0000b09c, 0x00030303}, + {0x0000b0a0, 0x00000000}, + {0x0000b0a4, 0x00000000}, + {0x0000b0a8, 0x00000000}, + {0x0000b0ac, 0x00000000}, + {0x0000b0b0, 0x00000000}, + {0x0000b0b4, 0x00000000}, + {0x0000b0b8, 0x00000000}, + {0x0000b0bc, 0x00000000}, + {0x0000b0c0, 0x003f0020}, + {0x0000b0c4, 0x00400041}, + {0x0000b0c8, 0x0140005f}, + {0x0000b0cc, 0x0160015f}, + {0x0000b0d0, 0x017e017f}, + {0x0000b0d4, 0x02410242}, + {0x0000b0d8, 0x025f0240}, + {0x0000b0dc, 0x027f0260}, + {0x0000b0e0, 0x0341027e}, + {0x0000b0e4, 0x035f0340}, + {0x0000b0e8, 0x037f0360}, + {0x0000b0ec, 0x04400441}, + {0x0000b0f0, 0x0460045f}, + {0x0000b0f4, 0x0541047f}, + {0x0000b0f8, 0x055f0540}, + {0x0000b0fc, 0x057f0560}, + {0x0000b100, 0x06400641}, + {0x0000b104, 0x0660065f}, + {0x0000b108, 0x067e067f}, + {0x0000b10c, 0x07410742}, + {0x0000b110, 0x075f0740}, + {0x0000b114, 0x077f0760}, + {0x0000b118, 0x07800781}, + {0x0000b11c, 0x07a0079f}, + {0x0000b120, 0x07c107bf}, + {0x0000b124, 0x000007c0}, + {0x0000b128, 0x00000000}, + {0x0000b12c, 0x00000000}, + {0x0000b130, 0x00000000}, + {0x0000b134, 0x00000000}, + {0x0000b138, 0x00000000}, + {0x0000b13c, 0x00000000}, + {0x0000b140, 0x003f0020}, + {0x0000b144, 0x00400041}, + {0x0000b148, 0x0140005f}, + {0x0000b14c, 0x0160015f}, + {0x0000b150, 0x017e017f}, + {0x0000b154, 0x02410242}, + {0x0000b158, 0x025f0240}, + {0x0000b15c, 0x027f0260}, + {0x0000b160, 0x0341027e}, + {0x0000b164, 0x035f0340}, + {0x0000b168, 0x037f0360}, + {0x0000b16c, 0x04400441}, + {0x0000b170, 0x0460045f}, + {0x0000b174, 0x0541047f}, + {0x0000b178, 0x055f0540}, + {0x0000b17c, 0x057f0560}, + {0x0000b180, 0x06400641}, + {0x0000b184, 0x0660065f}, + {0x0000b188, 0x067e067f}, + {0x0000b18c, 0x07410742}, + {0x0000b190, 0x075f0740}, + {0x0000b194, 0x077f0760}, + {0x0000b198, 0x07800781}, + {0x0000b19c, 0x07a0079f}, + {0x0000b1a0, 0x07c107bf}, + {0x0000b1a4, 0x000007c0}, + {0x0000b1a8, 0x00000000}, + {0x0000b1ac, 0x00000000}, + {0x0000b1b0, 0x00000000}, + {0x0000b1b4, 0x00000000}, + {0x0000b1b8, 0x00000000}, + {0x0000b1bc, 0x00000000}, + {0x0000b1c0, 0x00000000}, + {0x0000b1c4, 0x00000000}, + {0x0000b1c8, 0x00000000}, + {0x0000b1cc, 0x00000000}, + {0x0000b1d0, 0x00000000}, + {0x0000b1d4, 0x00000000}, + {0x0000b1d8, 0x00000000}, + {0x0000b1dc, 0x00000000}, + {0x0000b1e0, 0x00000000}, + {0x0000b1e4, 0x00000000}, + {0x0000b1e8, 0x00000000}, + {0x0000b1ec, 0x00000000}, + {0x0000b1f0, 0x00000396}, + {0x0000b1f4, 0x00000396}, + {0x0000b1f8, 0x00000396}, + {0x0000b1fc, 0x00000196}, +}; + +static const u32 qca953x_2p0_common_wo_xlna_rx_gain_bounds[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, + {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, +}; + +static const u32 qca953x_2p0_modes_xpa_tx_gain_table[][2] = { + /* Addr allmodes */ + {0x0000a2dc, 0xfffb52aa}, + {0x0000a2e0, 0xfffd64cc}, + {0x0000a2e4, 0xfffe80f0}, + {0x0000a2e8, 0xffffff00}, + {0x0000a410, 0x000050d5}, + {0x0000a500, 0x00000000}, + {0x0000a504, 0x04000002}, + {0x0000a508, 0x08000004}, + {0x0000a50c, 0x0c000006}, + {0x0000a510, 0x1000000a}, + {0x0000a514, 0x1400000c}, + {0x0000a518, 0x1800000e}, + {0x0000a51c, 0x1c000048}, + {0x0000a520, 0x2000004a}, + {0x0000a524, 0x2400004c}, + {0x0000a528, 0x2800004e}, + {0x0000a52c, 0x2b00024a}, + {0x0000a530, 0x2f00024c}, + {0x0000a534, 0x3300024e}, + {0x0000a538, 0x36000668}, + {0x0000a53c, 0x38000669}, + {0x0000a540, 0x3a000868}, + {0x0000a544, 0x3d00086a}, + {0x0000a548, 0x4000086c}, + {0x0000a54c, 0x4200086e}, + {0x0000a550, 0x43000a6e}, + {0x0000a554, 0x43000a6e}, + {0x0000a558, 0x43000a6e}, + {0x0000a55c, 0x43000a6e}, + {0x0000a560, 0x43000a6e}, + {0x0000a564, 0x43000a6e}, + {0x0000a568, 0x43000a6e}, + {0x0000a56c, 0x43000a6e}, + {0x0000a570, 0x43000a6e}, + {0x0000a574, 0x43000a6e}, + {0x0000a578, 0x43000a6e}, + {0x0000a57c, 0x43000a6e}, + {0x0000a600, 0x00000000}, + {0x0000a604, 0x00000000}, + {0x0000a608, 0x00000000}, + {0x0000a60c, 0x03804000}, + {0x0000a610, 0x03804e01}, + {0x0000a614, 0x03804e01}, + {0x0000a618, 0x03804e01}, + {0x0000a61c, 0x04009002}, + {0x0000a620, 0x04009002}, + {0x0000a624, 0x04009002}, + {0x0000a628, 0x04009002}, + {0x0000a62c, 0x04009002}, + {0x0000a630, 0x04009002}, + {0x0000a634, 0x04009002}, + {0x0000a638, 0x04009002}, + {0x0000a63c, 0x04009002}, + {0x0000b2dc, 0xfffb52aa}, + {0x0000b2e0, 0xfffd64cc}, + {0x0000b2e4, 0xfffe80f0}, + {0x0000b2e8, 0xffffff00}, + {0x00016044, 0x024922db}, + {0x00016048, 0x6c927a70}, + {0x00016444, 0x024922db}, + {0x00016448, 0x6c927a70}, +}; + +static const u32 qca953x_2p0_modes_no_xpa_tx_gain_table[][2] = { + /* Addr allmodes */ + {0x0000a2dc, 0xffd5f552}, + {0x0000a2e0, 0xffe60664}, + {0x0000a2e4, 0xfff80780}, + {0x0000a2e8, 0xfffff800}, + {0x0000a410, 0x000050de}, + {0x0000a500, 0x00000061}, + {0x0000a504, 0x04000063}, + {0x0000a508, 0x08000065}, + {0x0000a50c, 0x0c000261}, + {0x0000a510, 0x10000263}, + {0x0000a514, 0x14000265}, + {0x0000a518, 0x18000482}, + {0x0000a51c, 0x1b000484}, + {0x0000a520, 0x1f000486}, + {0x0000a524, 0x240008c2}, + {0x0000a528, 0x28000cc1}, + {0x0000a52c, 0x2d000ce3}, + {0x0000a530, 0x31000ce5}, + {0x0000a534, 0x350010e5}, + {0x0000a538, 0x360012e5}, + {0x0000a53c, 0x380014e5}, + {0x0000a540, 0x3b0018e5}, + {0x0000a544, 0x3d001d04}, + {0x0000a548, 0x3e001d05}, + {0x0000a54c, 0x40001d07}, + {0x0000a550, 0x42001f27}, + {0x0000a554, 0x43001f67}, + {0x0000a558, 0x46001fe7}, + {0x0000a55c, 0x47001f2b}, + {0x0000a560, 0x49001f0d}, + {0x0000a564, 0x4b001ed2}, + {0x0000a568, 0x4c001ed4}, + {0x0000a56c, 0x4e001f15}, + {0x0000a570, 0x4f001ff6}, + {0x0000a574, 0x4f001ff6}, + {0x0000a578, 0x4f001ff6}, + {0x0000a57c, 0x4f001ff6}, + {0x0000a600, 0x00000000}, + {0x0000a604, 0x00000000}, + {0x0000a608, 0x00000000}, + {0x0000a60c, 0x00804201}, + {0x0000a610, 0x01008201}, + {0x0000a614, 0x0180c402}, + {0x0000a618, 0x0180c603}, + {0x0000a61c, 0x0180c603}, + {0x0000a620, 0x01c10603}, + {0x0000a624, 0x01c10704}, + {0x0000a628, 0x02c18b05}, + {0x0000a62c, 0x02c14c07}, + {0x0000a630, 0x01008704}, + {0x0000a634, 0x01c10402}, + {0x0000a638, 0x0301cc07}, + {0x0000a63c, 0x0301cc07}, + {0x0000b2dc, 0xffd5f552}, + {0x0000b2e0, 0xffe60664}, + {0x0000b2e4, 0xfff80780}, + {0x0000b2e8, 0xfffff800}, + {0x00016044, 0x049242db}, + {0x00016048, 0x6c927a70}, + {0x00016444, 0x049242db}, + {0x00016448, 0x6c927a70}, +}; + #endif /* INITVALS_953X_H */ diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h index 74d8bc05b317af2818317b5d1e77dc30ec8d8e8c..fd6a84ccd49e71fc086a00dcd9831efca3a59071 100644 --- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h @@ -507,7 +507,7 @@ static const u32 ar955x_1p0_baseband_core[][2] = { {0x00009d04, 0x40206c10}, {0x00009d08, 0x009c4060}, {0x00009d0c, 0x9883800a}, - {0x00009d10, 0x01834061}, + {0x00009d10, 0x01884061}, {0x00009d14, 0x00c0040b}, {0x00009d18, 0x00000000}, {0x00009e08, 0x0038230c}, @@ -545,9 +545,9 @@ static const u32 ar955x_1p0_baseband_core[][2] = { {0x0000a370, 0x00000000}, {0x0000a390, 0x00000001}, {0x0000a394, 0x00000444}, - {0x0000a398, 0x1f020503}, - {0x0000a39c, 0x29180c03}, - {0x0000a3a0, 0x9a8b6844}, + {0x0000a398, 0x001f0e0f}, + {0x0000a39c, 0x0075393f}, + {0x0000a3a0, 0xb79f6427}, {0x0000a3a4, 0x00000000}, {0x0000a3a8, 0xaaaaaaaa}, {0x0000a3ac, 0x3c466478}, diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index a5ca65240af30b8980b5732a610d73ef155063c9..5d4629f96c15ab582d359f78105d17fc0258316b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h @@ -24,7 +24,149 @@ #define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble -#define ar9580_1p0_radio_core ar9300_2p2_radio_core +static const u32 ar9580_1p0_radio_core[][2] = { + /* Addr allmodes */ + {0x00016000, 0x36db2db6}, + {0x00016004, 0x6db6db40}, + {0x00016008, 0x73f00000}, + {0x0001600c, 0x00000000}, + {0x00016040, 0x7f80fff8}, + {0x0001604c, 0x76d005b5}, + {0x00016050, 0x556cf031}, + {0x00016054, 0x13449440}, + {0x00016058, 0x0c51c92c}, + {0x0001605c, 0x3db7fffc}, + {0x00016060, 0xfffffffc}, + {0x00016064, 0x000f0278}, + {0x0001606c, 0x6db60000}, + {0x00016080, 0x00000000}, + {0x00016084, 0x0e48048c}, + {0x00016088, 0x54214514}, + {0x0001608c, 0x119f481e}, + {0x00016090, 0x24926490}, + {0x00016098, 0xd2888888}, + {0x000160a0, 0x0a108ffe}, + {0x000160a4, 0x812fc370}, + {0x000160a8, 0x423c8000}, + {0x000160b4, 0x92480080}, + {0x000160c0, 0x00adb6d0}, + {0x000160c4, 0x6db6db60}, + {0x000160c8, 0x6db6db6c}, + {0x000160cc, 0x01e6c000}, + {0x00016100, 0x3fffbe01}, + {0x00016104, 0xfff80000}, + {0x00016108, 0x00080010}, + {0x00016144, 0x02084080}, + {0x00016148, 0x00000000}, + {0x00016280, 0x058a0001}, + {0x00016284, 0x3d840208}, + {0x00016288, 0x05a20408}, + {0x0001628c, 0x00038c07}, + {0x00016290, 0x00000004}, + {0x00016294, 0x458a214f}, + {0x00016380, 0x00000000}, + {0x00016384, 0x00000000}, + {0x00016388, 0x00800700}, + {0x0001638c, 0x00800700}, + {0x00016390, 0x00800700}, + {0x00016394, 0x00000000}, + {0x00016398, 0x00000000}, + {0x0001639c, 0x00000000}, + {0x000163a0, 0x00000001}, + {0x000163a4, 0x00000001}, + {0x000163a8, 0x00000000}, + {0x000163ac, 0x00000000}, + {0x000163b0, 0x00000000}, + {0x000163b4, 0x00000000}, + {0x000163b8, 0x00000000}, + {0x000163bc, 0x00000000}, + {0x000163c0, 0x000000a0}, + {0x000163c4, 0x000c0000}, + {0x000163c8, 0x14021402}, + {0x000163cc, 0x00001402}, + {0x000163d0, 0x00000000}, + {0x000163d4, 0x00000000}, + {0x00016400, 0x36db2db6}, + {0x00016404, 0x6db6db40}, + {0x00016408, 0x73f00000}, + {0x0001640c, 0x00000000}, + {0x00016440, 0x7f80fff8}, + {0x0001644c, 0x76d005b5}, + {0x00016450, 0x556cf031}, + {0x00016454, 0x13449440}, + {0x00016458, 0x0c51c92c}, + {0x0001645c, 0x3db7fffc}, + {0x00016460, 0xfffffffc}, + {0x00016464, 0x000f0278}, + {0x0001646c, 0x6db60000}, + {0x00016500, 0x3fffbe01}, + {0x00016504, 0xfff80000}, + {0x00016508, 0x00080010}, + {0x00016544, 0x02084080}, + {0x00016548, 0x00000000}, + {0x00016780, 0x00000000}, + {0x00016784, 0x00000000}, + {0x00016788, 0x00800700}, + {0x0001678c, 0x00800700}, + {0x00016790, 0x00800700}, + {0x00016794, 0x00000000}, + {0x00016798, 0x00000000}, + {0x0001679c, 0x00000000}, + {0x000167a0, 0x00000001}, + {0x000167a4, 0x00000001}, + {0x000167a8, 0x00000000}, + {0x000167ac, 0x00000000}, + {0x000167b0, 0x00000000}, + {0x000167b4, 0x00000000}, + {0x000167b8, 0x00000000}, + {0x000167bc, 0x00000000}, + {0x000167c0, 0x000000a0}, + {0x000167c4, 0x000c0000}, + {0x000167c8, 0x14021402}, + {0x000167cc, 0x00001402}, + {0x000167d0, 0x00000000}, + {0x000167d4, 0x00000000}, + {0x00016800, 0x36db2db6}, + {0x00016804, 0x6db6db40}, + {0x00016808, 0x73f00000}, + {0x0001680c, 0x00000000}, + {0x00016840, 0x7f80fff8}, + {0x0001684c, 0x76d005b5}, + {0x00016850, 0x556cf031}, + {0x00016854, 0x13449440}, + {0x00016858, 0x0c51c92c}, + {0x0001685c, 0x3db7fffc}, + {0x00016860, 0xfffffffc}, + {0x00016864, 0x000f0278}, + {0x0001686c, 0x6db60000}, + {0x00016900, 0x3fffbe01}, + {0x00016904, 0xfff80000}, + {0x00016908, 0x00080010}, + {0x00016944, 0x02084080}, + {0x00016948, 0x00000000}, + {0x00016b80, 0x00000000}, + {0x00016b84, 0x00000000}, + {0x00016b88, 0x00800700}, + {0x00016b8c, 0x00800700}, + {0x00016b90, 0x00800700}, + {0x00016b94, 0x00000000}, + {0x00016b98, 0x00000000}, + {0x00016b9c, 0x00000000}, + {0x00016ba0, 0x00000001}, + {0x00016ba4, 0x00000001}, + {0x00016ba8, 0x00000000}, + {0x00016bac, 0x00000000}, + {0x00016bb0, 0x00000000}, + {0x00016bb4, 0x00000000}, + {0x00016bb8, 0x00000000}, + {0x00016bbc, 0x00000000}, + {0x00016bc0, 0x000000a0}, + {0x00016bc4, 0x000c0000}, + {0x00016bc8, 0x14021402}, + {0x00016bcc, 0x00001402}, + {0x00016bd0, 0x00000000}, + {0x00016bd4, 0x00000000}, +}; #define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 01a7db061c6a2b8df0a8cac77a747c506d971705..1a9fe0983a6be647010a009bd1e7e7be3ff7fb6d 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -28,7 +28,6 @@ #include "debug.h" #include "mci.h" #include "dfs.h" -#include "spectral.h" struct ath_node; struct ath_vif; @@ -190,6 +189,7 @@ struct ath_frame_info { u8 rtscts_rate; u8 retries : 7; u8 baw_tracked : 1; + u8 tx_power; }; struct ath_rxbuf { @@ -345,7 +345,9 @@ struct ath_chanctx { u64 tsf_val; u32 last_beacon; + int flush_timeout; u16 txpower; + u16 cur_txpower; bool offchannel; bool stopped; bool active; @@ -362,7 +364,7 @@ enum ath_chanctx_event { ATH_CHANCTX_EVENT_BEACON_SENT, ATH_CHANCTX_EVENT_TSF_TIMER, ATH_CHANCTX_EVENT_BEACON_RECEIVED, - ATH_CHANCTX_EVENT_ASSOC, + ATH_CHANCTX_EVENT_AUTHORIZED, ATH_CHANCTX_EVENT_SWITCH, ATH_CHANCTX_EVENT_ASSIGN, ATH_CHANCTX_EVENT_UNASSIGN, @@ -380,10 +382,12 @@ enum ath_chanctx_state { struct ath_chanctx_sched { bool beacon_pending; + bool beacon_adjust; bool offchannel_pending; bool wait_switch; bool force_noa_update; bool extend_absence; + bool mgd_prepare_tx; enum ath_chanctx_state state; u8 beacon_miss; @@ -468,6 +472,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force); void ath_offchannel_next(struct ath_softc *sc); void ath_scan_complete(struct ath_softc *sc, bool abort); void ath_roc_complete(struct ath_softc *sc, bool abort); +struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc); #else @@ -540,7 +545,6 @@ static inline void ath_chanctx_check_active(struct ath_softc *sc, #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ -int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan); void ath_startrecv(struct ath_softc *sc); bool ath_stoprecv(struct ath_softc *sc); u32 ath_calcrxfilter(struct ath_softc *sc); @@ -595,7 +599,7 @@ struct ath_vif { u16 seq_no; /* BSS info */ - u8 bssid[ETH_ALEN]; + u8 bssid[ETH_ALEN] __aligned(2); u16 aid; bool assoc; @@ -618,6 +622,7 @@ struct ath_vif { u32 noa_start; u32 noa_duration; bool periodic_noa; + bool oneshot_noa; }; struct ath9k_vif_iter_data { @@ -715,7 +720,8 @@ int ath_update_survey_stats(struct ath_softc *sc); void ath_update_survey_nf(struct ath_softc *sc, int channel); void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type); void ath_ps_full_sleep(unsigned long data); -void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop); +void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop, + bool sw_pending, bool timeout_override); /**********/ /* BTCOEX */ @@ -927,6 +933,7 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); #define ATH9K_PCI_AR9565_2ANT 0x0100 #define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200 #define ATH9K_PCI_KILLER 0x0400 +#define ATH9K_PCI_LED_ACT_HI 0x0800 /* * Default cache line size, in bytes. @@ -975,6 +982,7 @@ struct ath_softc { struct ath_chanctx_sched sched; struct ath_offchannel offchannel; struct ath_chanctx *next_chan; + struct completion go_beacon; #endif unsigned long driver_data; @@ -982,7 +990,6 @@ struct ath_softc { u8 gtt_cnt; u32 intrstatus; u16 ps_flags; /* PS_* */ - u16 curtxpow; bool ps_enabled; bool ps_idle; short nbcnvifs; @@ -1023,10 +1030,8 @@ struct ath_softc { struct dfs_pattern_detector *dfs_detector; u64 dfs_prev_pulse_ts; u32 wow_enabled; - /* relay(fs) channel for spectral scan */ - struct rchan *rfs_chan_spec_scan; - enum spectral_mode spectral_mode; - struct ath_spec_scan spec_config; + + struct ath_spec_scan_priv spec_priv; struct ieee80211_vif *tx99_vif; struct sk_buff *tx99_skb; @@ -1069,7 +1074,7 @@ void ath9k_tasklet(unsigned long data); int ath_cabq_update(struct ath_softc *); u8 ath9k_parse_mpdudensity(u8 mpdudensity); irqreturn_t ath_isr(int irq, void *dev); -int ath_reset(struct ath_softc *sc); +int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan); void ath_cancel_work(struct ath_softc *sc); void ath_restart_work(struct ath_softc *sc); int ath9k_init_device(u16 devid, struct ath_softc *sc, diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index ecb783beeec2a1257ccc3abafa8bced7818246b2..cb366adc820b17a667a908d79db82da8519f3783 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -78,7 +78,7 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif, struct ath_tx_info info; struct ieee80211_supported_band *sband; u8 chainmask = ah->txchainmask; - u8 rate = 0; + u8 i, rate = 0; sband = &common->sbands[sc->cur_chandef.chan->band]; rate = sband->bitrates[rateidx].hw_value; @@ -88,7 +88,8 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif, memset(&info, 0, sizeof(info)); info.pkt_len = skb->len + FCS_LEN; info.type = ATH9K_PKT_TYPE_BEACON; - info.txpower = MAX_RATE_POWER; + for (i = 0; i < 4; i++) + info.txpower[i] = MAX_RATE_POWER; info.keyix = ATH9K_TXKEYIX_INVALID; info.keytype = ATH9K_KEY_TYPE_CLEAR; info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_CLRDMASK; diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 278365b8a8955dda362292b99949e63155be354c..e200a6e3aca5f4e4ce98354814358724cb0444d3 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -234,7 +234,7 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update) REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); } -void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) +int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath9k_nfcal_hist *h = NULL; unsigned i, j; @@ -301,7 +301,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) ath_dbg(common, ANY, "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", REG_READ(ah, AR_PHY_AGC_CONTROL)); - return; + return -ETIMEDOUT; } /* @@ -322,6 +322,8 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) } } REGWRITE_BUFFER_FLUSH(ah); + + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h index b8ed95e9a335d90d86e8d0dd4031e5abc2e8dffc..87badf4bb8a4109bfeb9bdda6454fff2a0b2e630 100644 --- a/drivers/net/wireless/ath/ath9k/calib.h +++ b/drivers/net/wireless/ath/ath9k/calib.h @@ -109,7 +109,7 @@ struct ath9k_pacal_info{ bool ath9k_hw_reset_calvalid(struct ath_hw *ah); void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update); -void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan); +int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan); bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan); void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, struct ath9k_channel *chan); diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 945c89826b14cc70f1dd5f36b4bcd2b9efc21b10..206665059d66a67b0bbd2a816ba4c2c4d187ee48 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -66,7 +66,7 @@ static int ath_set_channel(struct ath_softc *sc) } hchan = &sc->sc_ah->channels[pos]; - r = ath_reset_internal(sc, hchan); + r = ath_reset(sc, hchan); if (r) return r; @@ -92,8 +92,8 @@ static int ath_set_channel(struct ath_softc *sc) } else { /* perform spectral scan if requested. */ if (test_bit(ATH_OP_SCANNING, &common->op_flags) && - sc->spectral_mode == SPECTRAL_CHANSCAN) - ath9k_spectral_scan_trigger(hw); + sc->spec_priv.spectral_mode == SPECTRAL_CHANSCAN) + ath9k_cmn_spectral_scan_trigger(common, &sc->spec_priv); } return 0; @@ -117,6 +117,7 @@ void ath_chanctx_init(struct ath_softc *sc) cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20); INIT_LIST_HEAD(&ctx->vifs); ctx->txpower = ATH_TXPOWER_MAX; + ctx->flush_timeout = HZ / 5; /* 200ms */ for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) INIT_LIST_HEAD(&ctx->acq[j]); } @@ -145,6 +146,36 @@ void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx, #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT +/*************/ +/* Utilities */ +/*************/ + +struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc) +{ + struct ath_chanctx *ctx; + struct ath_vif *avp; + struct ieee80211_vif *vif; + + spin_lock_bh(&sc->chan_lock); + + ath_for_each_chanctx(sc, ctx) { + if (!ctx->active) + continue; + + list_for_each_entry(avp, &ctx->vifs, list) { + vif = avp->vif; + + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) { + spin_unlock_bh(&sc->chan_lock); + return ctx; + } + } + } + + spin_unlock_bh(&sc->chan_lock); + return NULL; +} + /**********************************************************/ /* Functions to handle the channel context state machine. */ /**********************************************************/ @@ -171,7 +202,7 @@ static const char *chanctx_event_string(enum ath_chanctx_event ev) case_rtn_string(ATH_CHANCTX_EVENT_BEACON_SENT); case_rtn_string(ATH_CHANCTX_EVENT_TSF_TIMER); case_rtn_string(ATH_CHANCTX_EVENT_BEACON_RECEIVED); - case_rtn_string(ATH_CHANCTX_EVENT_ASSOC); + case_rtn_string(ATH_CHANCTX_EVENT_AUTHORIZED); case_rtn_string(ATH_CHANCTX_EVENT_SWITCH); case_rtn_string(ATH_CHANCTX_EVENT_ASSIGN); case_rtn_string(ATH_CHANCTX_EVENT_UNASSIGN); @@ -198,6 +229,7 @@ static const char *chanctx_state_string(enum ath_chanctx_state state) void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_chanctx *ictx; struct ath_vif *avp; bool active = false; u8 n_active = 0; @@ -205,6 +237,28 @@ void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) if (!ctx) return; + if (ctx == &sc->offchannel.chan) { + spin_lock_bh(&sc->chan_lock); + + if (likely(sc->sched.channel_switch_time)) + ctx->flush_timeout = + usecs_to_jiffies(sc->sched.channel_switch_time); + else + ctx->flush_timeout = + msecs_to_jiffies(10); + + spin_unlock_bh(&sc->chan_lock); + + /* + * There is no need to iterate over the + * active/assigned channel contexts if + * the current context is offchannel. + */ + return; + } + + ictx = ctx; + list_for_each_entry(avp, &ctx->vifs, list) { struct ieee80211_vif *vif = avp->vif; @@ -227,12 +281,23 @@ void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) n_active++; } + spin_lock_bh(&sc->chan_lock); + if (n_active <= 1) { + ictx->flush_timeout = HZ / 5; clear_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags); + spin_unlock_bh(&sc->chan_lock); return; } - if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) + + ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time); + + if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) { + spin_unlock_bh(&sc->chan_lock); return; + } + + spin_unlock_bh(&sc->chan_lock); if (ath9k_is_chanctx_enabled()) { ath_chanctx_event(sc, NULL, @@ -301,6 +366,111 @@ static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time) "Setup chanctx timer with timeout: %d ms\n", jiffies_to_msecs(tsf_time)); } +static void ath_chanctx_handle_bmiss(struct ath_softc *sc, + struct ath_chanctx *ctx, + struct ath_vif *avp) +{ + /* + * Clear the extend_absence flag if it had been + * set during the previous beacon transmission, + * since we need to revert to the normal NoA + * schedule. + */ + if (ctx->active && sc->sched.extend_absence) { + avp->noa_duration = 0; + sc->sched.extend_absence = false; + } + + /* If at least two consecutive beacons were missed on the STA + * chanctx, stay on the STA channel for one extra beacon period, + * to resync the timer properly. + */ + if (ctx->active && sc->sched.beacon_miss >= 2) { + avp->noa_duration = 0; + sc->sched.extend_absence = true; + } +} + +static void ath_chanctx_offchannel_noa(struct ath_softc *sc, + struct ath_chanctx *ctx, + struct ath_vif *avp, + u32 tsf_time) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + avp->noa_index++; + avp->offchannel_start = tsf_time; + avp->offchannel_duration = sc->sched.offchannel_duration; + + ath_dbg(common, CHAN_CTX, + "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n", + avp->offchannel_duration, + avp->offchannel_start, + avp->noa_index); + + /* + * When multiple contexts are active, the NoA + * has to be recalculated and advertised after + * an offchannel operation. + */ + if (ctx->active && avp->noa_duration) + avp->noa_duration = 0; +} + +static void ath_chanctx_set_periodic_noa(struct ath_softc *sc, + struct ath_vif *avp, + struct ath_beacon_config *cur_conf, + u32 tsf_time, + u32 beacon_int) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + avp->noa_index++; + avp->noa_start = tsf_time; + + if (sc->sched.extend_absence) + avp->noa_duration = (3 * beacon_int / 2) + + sc->sched.channel_switch_time; + else + avp->noa_duration = + TU_TO_USEC(cur_conf->beacon_interval) / 2 + + sc->sched.channel_switch_time; + + if (test_bit(ATH_OP_SCANNING, &common->op_flags) || + sc->sched.extend_absence) + avp->periodic_noa = false; + else + avp->periodic_noa = true; + + ath_dbg(common, CHAN_CTX, + "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n", + avp->noa_duration, + avp->noa_start, + avp->noa_index, + avp->periodic_noa); +} + +static void ath_chanctx_set_oneshot_noa(struct ath_softc *sc, + struct ath_vif *avp, + u32 tsf_time, + u32 duration) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + avp->noa_index++; + avp->noa_start = tsf_time; + avp->periodic_noa = false; + avp->oneshot_noa = true; + avp->noa_duration = duration + sc->sched.channel_switch_time; + + ath_dbg(common, CHAN_CTX, + "oneshot noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n", + avp->noa_duration, + avp->noa_start, + avp->noa_index, + avp->periodic_noa); +} + void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, enum ath_chanctx_event ev) { @@ -327,6 +497,14 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, if (avp->offchannel_duration) avp->offchannel_duration = 0; + if (avp->oneshot_noa) { + avp->noa_duration = 0; + avp->oneshot_noa = false; + + ath_dbg(common, CHAN_CTX, + "Clearing oneshot NoA\n"); + } + if (avp->chanctx != sc->cur_chan) { ath_dbg(common, CHAN_CTX, "Contexts differ, not preparing beacon\n"); @@ -356,6 +534,24 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, "Move chanctx state from WAIT_FOR_TIMER to WAIT_FOR_BEACON\n"); } + if (sc->sched.mgd_prepare_tx) + sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; + + /* + * When a context becomes inactive, for example, + * disassociation of a station context, the NoA + * attribute needs to be removed from subsequent + * beacons. + */ + if (!ctx->active && avp->noa_duration && + sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) { + avp->noa_duration = 0; + avp->periodic_noa = false; + + ath_dbg(common, CHAN_CTX, + "Clearing NoA schedule\n"); + } + if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) break; @@ -378,45 +574,22 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, * values and increment the index. */ if (sc->next_chan == &sc->offchannel.chan) { - avp->noa_index++; - avp->offchannel_start = tsf_time; - avp->offchannel_duration = sc->sched.offchannel_duration; - - ath_dbg(common, CHAN_CTX, - "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n", - avp->offchannel_duration, - avp->offchannel_start, - avp->noa_index); - - /* - * When multiple contexts are active, the NoA - * has to be recalculated and advertised after - * an offchannel operation. - */ - if (ctx->active && avp->noa_duration) - avp->noa_duration = 0; - + ath_chanctx_offchannel_noa(sc, ctx, avp, tsf_time); break; } - /* - * Clear the extend_absence flag if it had been - * set during the previous beacon transmission, - * since we need to revert to the normal NoA - * schedule. - */ - if (ctx->active && sc->sched.extend_absence) { - avp->noa_duration = 0; - sc->sched.extend_absence = false; - } + ath_chanctx_handle_bmiss(sc, ctx, avp); - /* If at least two consecutive beacons were missed on the STA - * chanctx, stay on the STA channel for one extra beacon period, - * to resync the timer properly. + /* + * If a mgd_prepare_tx() has been called by mac80211, + * a one-shot NoA needs to be sent. This can happen + * with one or more active channel contexts - in both + * cases, a new NoA schedule has to be advertised. */ - if (ctx->active && sc->sched.beacon_miss >= 2) { - avp->noa_duration = 0; - sc->sched.extend_absence = true; + if (sc->sched.mgd_prepare_tx) { + ath_chanctx_set_oneshot_noa(sc, avp, tsf_time, + jiffies_to_usecs(HZ / 5)); + break; } /* Prevent wrap-around issues */ @@ -429,31 +602,9 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, * announcement. */ if (ctx->active && - (!avp->noa_duration || sc->sched.force_noa_update)) { - avp->noa_index++; - avp->noa_start = tsf_time; - - if (sc->sched.extend_absence) - avp->noa_duration = (3 * beacon_int / 2) + - sc->sched.channel_switch_time; - else - avp->noa_duration = - TU_TO_USEC(cur_conf->beacon_interval) / 2 + - sc->sched.channel_switch_time; - - if (test_bit(ATH_OP_SCANNING, &common->op_flags) || - sc->sched.extend_absence) - avp->periodic_noa = false; - else - avp->periodic_noa = true; - - ath_dbg(common, CHAN_CTX, - "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n", - avp->noa_duration, - avp->noa_start, - avp->noa_index, - avp->periodic_noa); - } + (!avp->noa_duration || sc->sched.force_noa_update)) + ath_chanctx_set_periodic_noa(sc, avp, cur_conf, + tsf_time, beacon_int); if (ctx->active && sc->sched.force_noa_update) sc->sched.force_noa_update = false; @@ -467,6 +618,15 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, } sc->sched.beacon_pending = false; + + if (sc->sched.mgd_prepare_tx) { + sc->sched.mgd_prepare_tx = false; + complete(&sc->go_beacon); + ath_dbg(common, CHAN_CTX, + "Beacon sent, complete go_beacon\n"); + break; + } + if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) break; @@ -495,10 +655,16 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, sc->cur_chan == &sc->offchannel.chan) break; - ath_chanctx_adjust_tbtt_delta(sc); sc->sched.beacon_pending = false; sc->sched.beacon_miss = 0; + if (sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE || + !sc->sched.beacon_adjust || + !sc->cur_chan->tsf_val) + break; + + ath_chanctx_adjust_tbtt_delta(sc); + /* TSF time might have been updated by the incoming beacon, * need update the channel switch timer to reflect the change. */ @@ -507,10 +673,10 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL); tsf_time += ath9k_hw_gettsf32(ah); - + sc->sched.beacon_adjust = false; ath_chanctx_setup_timer(sc, tsf_time); break; - case ATH_CHANCTX_EVENT_ASSOC: + case ATH_CHANCTX_EVENT_AUTHORIZED: if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE || avp->chanctx != sc->cur_chan) break; @@ -552,6 +718,7 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, ath_chanctx_setup_timer(sc, tsf_time); sc->sched.beacon_pending = true; + sc->sched.beacon_adjust = true; break; case ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL: if (sc->cur_chan == &sc->offchannel.chan || @@ -578,22 +745,6 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, ieee80211_queue_work(sc->hw, &sc->chanctx_work); break; case ATH_CHANCTX_EVENT_ASSIGN: - /* - * When adding a new channel context, check if a scan - * is in progress and abort it since the addition of - * a new channel context is usually followed by VIF - * assignment, in which case we have to start multi-channel - * operation. - */ - if (test_bit(ATH_OP_SCANNING, &common->op_flags)) { - ath_dbg(common, CHAN_CTX, - "Aborting HW scan to add new context\n"); - - spin_unlock_bh(&sc->chan_lock); - del_timer_sync(&sc->offchannel.timer); - ath_scan_complete(sc, true); - spin_lock_bh(&sc->chan_lock); - } break; case ATH_CHANCTX_EVENT_CHANGE: break; @@ -751,6 +902,11 @@ void ath_offchannel_next(struct ath_softc *sc) sc->offchannel.state = ATH_OFFCHANNEL_ROC_START; ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan); } else { + spin_lock_bh(&sc->chan_lock); + sc->sched.offchannel_pending = false; + sc->sched.wait_switch = false; + spin_unlock_bh(&sc->chan_lock); + ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false), NULL); sc->offchannel.state = ATH_OFFCHANNEL_IDLE; @@ -770,8 +926,7 @@ void ath_roc_complete(struct ath_softc *sc, bool abort) sc->offchannel.roc_vif = NULL; sc->offchannel.roc_chan = NULL; - if (!abort) - ieee80211_remain_on_channel_expired(sc->hw); + ieee80211_remain_on_channel_expired(sc->hw); ath_offchannel_next(sc); ath9k_ps_restore(sc); } @@ -808,7 +963,7 @@ static void ath_scan_send_probe(struct ath_softc *sc, struct ieee80211_tx_info *info; int band = sc->offchannel.chan.chandef.chan->band; - skb = ieee80211_probereq_get(sc->hw, vif, + skb = ieee80211_probereq_get(sc->hw, vif->addr, ssid->ssid, ssid->ssid_len, req->ie_len); if (!skb) return; @@ -902,9 +1057,8 @@ static void ath_offchannel_timer(unsigned long data) break; case ATH_OFFCHANNEL_ROC_START: case ATH_OFFCHANNEL_ROC_WAIT: - ctx = ath_chanctx_get_oper_chan(sc, false); sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE; - ath_chanctx_switch(sc, ctx, NULL); + ath_roc_complete(sc, false); break; default: break; @@ -1034,7 +1188,6 @@ static void ath_offchannel_channel_change(struct ath_softc *sc) ieee80211_ready_on_channel(sc->hw); break; case ATH_OFFCHANNEL_ROC_DONE: - ath_roc_complete(sc, false); break; default: break; @@ -1082,10 +1235,11 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force) ath9k_chanctx_stop_queues(sc, sc->cur_chan); queues_stopped = true; - __ath9k_flush(sc->hw, ~0, true); + __ath9k_flush(sc->hw, ~0, true, false, false); if (ath_chanctx_send_ps_frame(sc, true)) - __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), false); + __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), + false, false, false); send_ps = true; spin_lock_bh(&sc->chan_lock); @@ -1177,6 +1331,8 @@ void ath9k_init_channel_context(struct ath_softc *sc) (unsigned long)sc); setup_timer(&sc->sched.timer, ath_chanctx_timer, (unsigned long)sc); + + init_completion(&sc->go_beacon); } void ath9k_deinit_channel_context(struct ath_softc *sc) diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c similarity index 73% rename from drivers/net/wireless/ath/ath9k/spectral.c rename to drivers/net/wireless/ath/ath9k/common-spectral.c index 8f68426ca653d93dc58ea990a213c7091a13553a..ec93ddf0863a169622ecaa80b643597853f4a7ca 100644 --- a/drivers/net/wireless/ath/ath9k/spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -24,23 +24,24 @@ static s8 fix_rssi_inv_only(u8 rssi_val) return (s8) rssi_val; } -static void ath_debug_send_fft_sample(struct ath_softc *sc, +static void ath_debug_send_fft_sample(struct ath_spec_scan_priv *spec_priv, struct fft_sample_tlv *fft_sample_tlv) { int length; - if (!sc->rfs_chan_spec_scan) + if (!spec_priv->rfs_chan_spec_scan) return; length = __be16_to_cpu(fft_sample_tlv->length) + sizeof(*fft_sample_tlv); - relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length); + relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length); } /* returns 1 if this was a spectral frame, even if not handled. */ -int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, +int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr, struct ath_rx_status *rs, u64 tsf) { - struct ath_hw *ah = sc->sc_ah; + struct ath_hw *ah = spec_priv->ah; + struct ath_common *common = ath9k_hw_common(spec_priv->ah); u8 num_bins, *bins, *vdata = (u8 *)hdr; struct fft_sample_ht20 fft_sample_20; struct fft_sample_ht20_40 fft_sample_40; @@ -67,7 +68,7 @@ int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK)) return 0; - chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef); + chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef); if ((chan_type == NL80211_CHAN_HT40MINUS) || (chan_type == NL80211_CHAN_HT40PLUS)) { fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN; @@ -199,10 +200,11 @@ int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, tlv = (struct fft_sample_tlv *)&fft_sample_20; } - ath_debug_send_fft_sample(sc, tlv); + ath_debug_send_fft_sample(spec_priv, tlv); return 1; } +EXPORT_SYMBOL(ath_cmn_process_fft); /*********************/ /* spectral_scan_ctl */ @@ -211,11 +213,11 @@ int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; + struct ath_spec_scan_priv *spec_priv = file->private_data; char *mode = ""; unsigned int len; - switch (sc->spectral_mode) { + switch (spec_priv->spectral_mode) { case SPECTRAL_DISABLED: mode = "disable"; break; @@ -233,12 +235,84 @@ static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf, return simple_read_from_buffer(user_buf, count, ppos, mode, len); } +void ath9k_cmn_spectral_scan_trigger(struct ath_common *common, + struct ath_spec_scan_priv *spec_priv) +{ + struct ath_hw *ah = spec_priv->ah; + u32 rxfilter; + + if (config_enabled(CONFIG_ATH9K_TX99)) + return; + + if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { + ath_err(common, "spectrum analyzer not implemented on this hardware\n"); + return; + } + + ath_ps_ops(common)->wakeup(common); + rxfilter = ath9k_hw_getrxfilter(ah); + ath9k_hw_setrxfilter(ah, rxfilter | + ATH9K_RX_FILTER_PHYRADAR | + ATH9K_RX_FILTER_PHYERR); + + /* TODO: usually this should not be neccesary, but for some reason + * (or in some mode?) the trigger must be called after the + * configuration, otherwise the register will have its values reset + * (on my ar9220 to value 0x01002310) + */ + ath9k_cmn_spectral_scan_config(common, spec_priv, spec_priv->spectral_mode); + ath9k_hw_ops(ah)->spectral_scan_trigger(ah); + ath_ps_ops(common)->restore(common); +} +EXPORT_SYMBOL(ath9k_cmn_spectral_scan_trigger); + +int ath9k_cmn_spectral_scan_config(struct ath_common *common, + struct ath_spec_scan_priv *spec_priv, + enum spectral_mode spectral_mode) +{ + struct ath_hw *ah = spec_priv->ah; + + if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { + ath_err(common, "spectrum analyzer not implemented on this hardware\n"); + return -1; + } + + switch (spectral_mode) { + case SPECTRAL_DISABLED: + spec_priv->spec_config.enabled = 0; + break; + case SPECTRAL_BACKGROUND: + /* send endless samples. + * TODO: is this really useful for "background"? + */ + spec_priv->spec_config.endless = 1; + spec_priv->spec_config.enabled = 1; + break; + case SPECTRAL_CHANSCAN: + case SPECTRAL_MANUAL: + spec_priv->spec_config.endless = 0; + spec_priv->spec_config.enabled = 1; + break; + default: + return -1; + } + + ath_ps_ops(common)->wakeup(common); + ath9k_hw_ops(ah)->spectral_scan_config(ah, &spec_priv->spec_config); + ath_ps_ops(common)->restore(common); + + spec_priv->spectral_mode = spectral_mode; + + return 0; +} +EXPORT_SYMBOL(ath9k_cmn_spectral_scan_config); + static ssize_t write_file_spec_scan_ctl(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; - struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_spec_scan_priv *spec_priv = file->private_data; + struct ath_common *common = ath9k_hw_common(spec_priv->ah); char buf[32]; ssize_t len; @@ -252,18 +326,18 @@ static ssize_t write_file_spec_scan_ctl(struct file *file, buf[len] = '\0'; if (strncmp("trigger", buf, 7) == 0) { - ath9k_spectral_scan_trigger(sc->hw); + ath9k_cmn_spectral_scan_trigger(common, spec_priv); } else if (strncmp("background", buf, 10) == 0) { - ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND); + ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_BACKGROUND); ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n"); } else if (strncmp("chanscan", buf, 8) == 0) { - ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN); + ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_CHANSCAN); ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n"); } else if (strncmp("manual", buf, 6) == 0) { - ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL); + ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_MANUAL); ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n"); } else if (strncmp("disable", buf, 7) == 0) { - ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED); + ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_DISABLED); ath_dbg(common, CONFIG, "spectral scan: disabled\n"); } else { return -EINVAL; @@ -288,11 +362,11 @@ static ssize_t read_file_spectral_short_repeat(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; + struct ath_spec_scan_priv *spec_priv = file->private_data; char buf[32]; unsigned int len; - len = sprintf(buf, "%d\n", sc->spec_config.short_repeat); + len = sprintf(buf, "%d\n", spec_priv->spec_config.short_repeat); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -300,7 +374,7 @@ static ssize_t write_file_spectral_short_repeat(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; + struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; char buf[32]; ssize_t len; @@ -316,7 +390,7 @@ static ssize_t write_file_spectral_short_repeat(struct file *file, if (val > 1) return -EINVAL; - sc->spec_config.short_repeat = val; + spec_priv->spec_config.short_repeat = val; return count; } @@ -336,11 +410,11 @@ static ssize_t read_file_spectral_count(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; + struct ath_spec_scan_priv *spec_priv = file->private_data; char buf[32]; unsigned int len; - len = sprintf(buf, "%d\n", sc->spec_config.count); + len = sprintf(buf, "%d\n", spec_priv->spec_config.count); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -348,7 +422,7 @@ static ssize_t write_file_spectral_count(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; + struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; char buf[32]; ssize_t len; @@ -364,7 +438,7 @@ static ssize_t write_file_spectral_count(struct file *file, if (val > 255) return -EINVAL; - sc->spec_config.count = val; + spec_priv->spec_config.count = val; return count; } @@ -384,11 +458,11 @@ static ssize_t read_file_spectral_period(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; + struct ath_spec_scan_priv *spec_priv = file->private_data; char buf[32]; unsigned int len; - len = sprintf(buf, "%d\n", sc->spec_config.period); + len = sprintf(buf, "%d\n", spec_priv->spec_config.period); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -396,7 +470,7 @@ static ssize_t write_file_spectral_period(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; + struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; char buf[32]; ssize_t len; @@ -412,7 +486,7 @@ static ssize_t write_file_spectral_period(struct file *file, if (val > 255) return -EINVAL; - sc->spec_config.period = val; + spec_priv->spec_config.period = val; return count; } @@ -432,11 +506,11 @@ static ssize_t read_file_spectral_fft_period(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; + struct ath_spec_scan_priv *spec_priv = file->private_data; char buf[32]; unsigned int len; - len = sprintf(buf, "%d\n", sc->spec_config.fft_period); + len = sprintf(buf, "%d\n", spec_priv->spec_config.fft_period); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -444,7 +518,7 @@ static ssize_t write_file_spectral_fft_period(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; + struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; char buf[32]; ssize_t len; @@ -460,7 +534,7 @@ static ssize_t write_file_spectral_fft_period(struct file *file, if (val > 15) return -EINVAL; - sc->spec_config.fft_period = val; + spec_priv->spec_config.fft_period = val; return count; } @@ -506,38 +580,41 @@ static struct rchan_callbacks rfs_spec_scan_cb = { /* Debug Init/Deinit */ /*********************/ -void ath9k_spectral_deinit_debug(struct ath_softc *sc) +void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) { - if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { - relay_close(sc->rfs_chan_spec_scan); - sc->rfs_chan_spec_scan = NULL; + if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) { + relay_close(spec_priv->rfs_chan_spec_scan); + spec_priv->rfs_chan_spec_scan = NULL; } } +EXPORT_SYMBOL(ath9k_cmn_spectral_deinit_debug); -void ath9k_spectral_init_debug(struct ath_softc *sc) +void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, + struct dentry *debugfs_phy) { - sc->rfs_chan_spec_scan = relay_open("spectral_scan", - sc->debug.debugfs_phy, + spec_priv->rfs_chan_spec_scan = relay_open("spectral_scan", + debugfs_phy, 1024, 256, &rfs_spec_scan_cb, NULL); debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, + debugfs_phy, spec_priv, &fops_spec_scan_ctl); debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, + debugfs_phy, spec_priv, &fops_spectral_short_repeat); debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, + debugfs_phy, spec_priv, &fops_spectral_count); debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, + debugfs_phy, spec_priv, &fops_spectral_period); debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, + debugfs_phy, spec_priv, &fops_spectral_fft_period); } +EXPORT_SYMBOL(ath9k_cmn_spectral_init_debug); diff --git a/drivers/net/wireless/ath/ath9k/spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h similarity index 85% rename from drivers/net/wireless/ath/ath9k/spectral.h rename to drivers/net/wireless/ath/ath9k/common-spectral.h index 7b410c6858b08741a4bf5bcea49b458ebe5c4e79..82d9dd29652cd2912239ff51246bdc7f24e6b8a8 100644 --- a/drivers/net/wireless/ath/ath9k/spectral.h +++ b/drivers/net/wireless/ath/ath9k/common-spectral.h @@ -92,6 +92,13 @@ struct ath_ht20_40_fft_packet { struct ath_radar_info radar_info; } __packed; +struct ath_spec_scan_priv { + struct ath_hw *ah; + /* relay(fs) channel for spectral scan */ + struct rchan *rfs_chan_spec_scan; + enum spectral_mode spectral_mode; + struct ath_spec_scan spec_config; +}; #define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet)) @@ -123,23 +130,15 @@ static inline u8 spectral_bitmap_weight(u8 *bins) return bins[0] & 0x3f; } -void ath9k_spectral_init_debug(struct ath_softc *sc); -void ath9k_spectral_deinit_debug(struct ath_softc *sc); +void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy); +void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv); -void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw); -int ath9k_spectral_scan_config(struct ieee80211_hw *hw, +void ath9k_cmn_spectral_scan_trigger(struct ath_common *common, + struct ath_spec_scan_priv *spec_priv); +int ath9k_cmn_spectral_scan_config(struct ath_common *common, + struct ath_spec_scan_priv *spec_priv, enum spectral_mode spectral_mode); - -#ifdef CONFIG_ATH9K_DEBUGFS -int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, +int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr, struct ath_rx_status *rs, u64 tsf); -#else -static inline int ath_process_fft(struct ath_softc *sc, - struct ieee80211_hdr *hdr, - struct ath_rx_status *rs, u64 tsf) -{ - return 0; -} -#endif /* CONFIG_ATH9K_DEBUGFS */ #endif /* SPECTRAL_H */ diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index 33b0c7aef2ea39f527962f483a8014a052ac10ce..e8c699446470cb9e33dc5f407ebfc81b5197af1b 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -159,7 +159,7 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common, if (test_bit(keyix, common->keymap)) rxs->flag |= RX_FLAG_DECRYPTED; } - if (ah->sw_mgmt_crypto && + if (ah->sw_mgmt_crypto_rx && (rxs->flag & RX_FLAG_DECRYPTED) && ieee80211_is_mgmt(fc)) /* Use software decrypt for management frames. */ diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index ffc454b18637588f37964857c5a1b4997cc2ee5c..2b79a568e8032c1fbc33a0fb550a5da6140f2865 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -24,6 +24,7 @@ #include "common-init.h" #include "common-beacon.h" #include "common-debug.h" +#include "common-spectral.h" /* Common header for Atheros 802.11n base driver cores */ diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 5c45e787814ed6a9b65f9f3e523626f282201ddb..696e3d5309c6bdfc5f890c6dd2189ceee28f51c1 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -828,13 +828,14 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf, i = 0; ath_for_each_chanctx(sc, ctx) { - if (!ctx->assigned || list_empty(&ctx->vifs)) + if (list_empty(&ctx->vifs)) continue; ath9k_calculate_iter_data(sc, ctx, &iter_data); len += scnprintf(buf + len, sizeof(buf) - len, - "VIF-COUNTS: CTX %i AP: %i STA: %i MESH: %i WDS: %i", - i++, iter_data.naps, iter_data.nstations, + "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i WDS: %i", + i++, (int)(ctx->assigned), iter_data.naps, + iter_data.nstations, iter_data.nmeshes, iter_data.nwds); len += scnprintf(buf + len, sizeof(buf) - len, " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n", @@ -852,36 +853,31 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; + static const char * const reset_cause[__RESET_TYPE_MAX] = { + [RESET_TYPE_BB_HANG] = "Baseband Hang", + [RESET_TYPE_BB_WATCHDOG] = "Baseband Watchdog", + [RESET_TYPE_FATAL_INT] = "Fatal HW Error", + [RESET_TYPE_TX_ERROR] = "TX HW error", + [RESET_TYPE_TX_GTT] = "Transmit timeout", + [RESET_TYPE_TX_HANG] = "TX Path Hang", + [RESET_TYPE_PLL_HANG] = "PLL RX Hang", + [RESET_TYPE_MAC_HANG] = "MAC Hang", + [RESET_TYPE_BEACON_STUCK] = "Stuck Beacon", + [RESET_TYPE_MCI] = "MCI Reset", + [RESET_TYPE_CALIBRATION] = "Calibration error", + }; char buf[512]; unsigned int len = 0; + int i; - len += scnprintf(buf + len, sizeof(buf) - len, - "%17s: %2d\n", "Baseband Hang", - sc->debug.stats.reset[RESET_TYPE_BB_HANG]); - len += scnprintf(buf + len, sizeof(buf) - len, - "%17s: %2d\n", "Baseband Watchdog", - sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]); - len += scnprintf(buf + len, sizeof(buf) - len, - "%17s: %2d\n", "Fatal HW Error", - sc->debug.stats.reset[RESET_TYPE_FATAL_INT]); - len += scnprintf(buf + len, sizeof(buf) - len, - "%17s: %2d\n", "TX HW error", - sc->debug.stats.reset[RESET_TYPE_TX_ERROR]); - len += scnprintf(buf + len, sizeof(buf) - len, - "%17s: %2d\n", "TX Path Hang", - sc->debug.stats.reset[RESET_TYPE_TX_HANG]); - len += scnprintf(buf + len, sizeof(buf) - len, - "%17s: %2d\n", "PLL RX Hang", - sc->debug.stats.reset[RESET_TYPE_PLL_HANG]); - len += scnprintf(buf + len, sizeof(buf) - len, - "%17s: %2d\n", "MAC Hang", - sc->debug.stats.reset[RESET_TYPE_MAC_HANG]); - len += scnprintf(buf + len, sizeof(buf) - len, - "%17s: %2d\n", "Stuck Beacon", - sc->debug.stats.reset[RESET_TYPE_BEACON_STUCK]); - len += scnprintf(buf + len, sizeof(buf) - len, - "%17s: %2d\n", "MCI Reset", - sc->debug.stats.reset[RESET_TYPE_MCI]); + for (i = 0; i < ARRAY_SIZE(reset_cause); i++) { + if (!reset_cause[i]) + continue; + + len += scnprintf(buf + len, sizeof(buf) - len, + "%17s: %2d\n", reset_cause[i], + sc->debug.stats.reset[i]); + } if (len > sizeof(buf)) len = sizeof(buf); @@ -1315,7 +1311,7 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw, void ath9k_deinit_debug(struct ath_softc *sc) { - ath9k_spectral_deinit_debug(sc); + ath9k_cmn_spectral_deinit_debug(&sc->spec_priv); } int ath9k_init_debug(struct ath_hw *ah) @@ -1335,7 +1331,7 @@ int ath9k_init_debug(struct ath_hw *ah) ath9k_dfs_init_debug(sc); ath9k_tx99_init_debug(sc); - ath9k_spectral_init_debug(sc); + ath9k_cmn_spectral_init_debug(&sc->spec_priv, sc->debug.debugfs_phy); debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_dma); diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 53ae15bd0c9d4ac8b40a7a9de3689c607d56c3cd..bd75b1f716db527208afc525a0fd186d527e6e29 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -49,6 +49,7 @@ enum ath_reset_type { RESET_TYPE_MAC_HANG, RESET_TYPE_BEACON_STUCK, RESET_TYPE_MCI, + RESET_TYPE_CALIBRATION, __RESET_TYPE_MAX }; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 3218ca9947463a844ed38f22956faedaca688cbb..122b846b8ec0e796bed2e15f8a6dc4341b9c00bc 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -262,7 +262,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) { struct ar5416_eeprom_def *eep = &ah->eeprom.def; struct ath_common *common = ath9k_hw_common(ah); - u16 *eepdata, temp, magic, magic2; + u16 *eepdata, temp, magic; u32 sum = 0, el; bool need_swap = false; int i, addr, size; @@ -272,27 +272,16 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) return false; } - if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); - - if (magic != AR5416_EEPROM_MAGIC) { - magic2 = swab16(magic); - - if (magic2 == AR5416_EEPROM_MAGIC) { - size = sizeof(struct ar5416_eeprom_def); - need_swap = true; - eepdata = (u16 *) (&ah->eeprom); + if (swab16(magic) == AR5416_EEPROM_MAGIC && + !(ah->ah_flags & AH_NO_EEP_SWAP)) { + size = sizeof(struct ar5416_eeprom_def); + need_swap = true; + eepdata = (u16 *) (&ah->eeprom); - for (addr = 0; addr < size / sizeof(u16); addr++) { - temp = swab16(*eepdata); - *eepdata = temp; - eepdata++; - } - } else { - ath_err(common, - "Invalid EEPROM Magic. Endianness mismatch.\n"); - return -EINVAL; - } + for (addr = 0; addr < size / sizeof(u16); addr++) { + temp = swab16(*eepdata); + *eepdata = temp; + eepdata++; } } diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index b1956bf6e01e12480fad2b0d8e580abca118399c..2fef7a480fec9a8c1013104dc61de38b1e1ad94c 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -25,7 +25,12 @@ static void ath_led_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev); - ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF)); + u32 val = (brightness == LED_OFF); + + if (sc->sc_ah->config.led_active_high) + val = !val; + + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, val); } void ath_deinit_leds(struct ath_softc *sc) @@ -82,7 +87,7 @@ void ath_fill_led_pin(struct ath_softc *sc) ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); /* LED off, active low */ - ath9k_hw_set_gpio(ah, ah->led_pin, 1); + ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1); } #endif diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 09a5d72f3ff5b4e9cf27b6c0305193e738c1aba5..9dde265d3f846a683534831b43778829f6ff0b4d 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -481,6 +481,7 @@ struct ath9k_htc_priv { unsigned long op_flags; struct ath9k_hw_cal_data caldata; + struct ath_spec_scan_priv spec_priv; spinlock_t beacon_lock; struct ath_beacon_config cur_beacon_conf; @@ -625,8 +626,12 @@ int ath9k_htc_resume(struct htc_target *htc_handle); #endif #ifdef CONFIG_ATH9K_HTC_DEBUGFS int ath9k_htc_init_debug(struct ath_hw *ah); +void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv); #else static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; }; +static inline void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv) +{ +} #endif /* CONFIG_ATH9K_HTC_DEBUGFS */ #endif /* HTC_H */ diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c index 8b529e4b8ac4f37019426102b5d8d4b428882701..8cef1edcc6217b0d71f5a58aa6c906f2416ebef3 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c @@ -490,6 +490,10 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw, WARN_ON(i != ATH9K_HTC_SSTATS_LEN); } +void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv) +{ + ath9k_cmn_spectral_deinit_debug(&priv->spec_priv); +} int ath9k_htc_init_debug(struct ath_hw *ah) { @@ -501,6 +505,8 @@ int ath9k_htc_init_debug(struct ath_hw *ah) if (!priv->debug.debugfs_phy) return -ENOMEM; + ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy); + debugfs_create_file("tgt_int_stats", S_IRUSR, priv->debug.debugfs_phy, priv, &fops_tgt_int_stats); debugfs_create_file("tgt_tx_stats", S_IRUSR, priv->debug.debugfs_phy, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 4014c4be6e79d86c72f68a2d57ace42fd4963907..e8fa9448da2410dc7cc833100853bc247d2ee378 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -53,6 +53,21 @@ static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = { }; #endif +static void ath9k_htc_op_ps_wakeup(struct ath_common *common) +{ + ath9k_htc_ps_wakeup((struct ath9k_htc_priv *) common->priv); +} + +static void ath9k_htc_op_ps_restore(struct ath_common *common) +{ + ath9k_htc_ps_restore((struct ath9k_htc_priv *) common->priv); +} + +static struct ath_ps_ops ath9k_htc_ps_ops = { + .wakeup = ath9k_htc_op_ps_wakeup, + .restore = ath9k_htc_op_ps_restore, +}; + static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv) { int time_left; @@ -87,6 +102,7 @@ static void ath9k_deinit_device(struct ath9k_htc_priv *priv) wiphy_rfkill_stop_polling(hw->wiphy); ath9k_deinit_leds(priv); + ath9k_htc_deinit_debug(priv); ieee80211_unregister_hw(hw); ath9k_rx_cleanup(priv); ath9k_tx_cleanup(priv); @@ -449,6 +465,14 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv) common->last_rssi = ATH_RSSI_DUMMY_MARKER; priv->ah->opmode = NL80211_IFTYPE_STATION; + + priv->spec_priv.ah = priv->ah; + priv->spec_priv.spec_config.enabled = 0; + priv->spec_priv.spec_config.short_repeat = false; + priv->spec_priv.spec_config.count = 8; + priv->spec_priv.spec_config.endless = false; + priv->spec_priv.spec_config.period = 0x12; + priv->spec_priv.spec_config.fft_period = 0x02; } static int ath9k_init_priv(struct ath9k_htc_priv *priv, @@ -478,6 +502,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, common = ath9k_hw_common(ah); common->ops = &ah->reg_ops; + common->ps_ops = &ath9k_htc_ps_ops; common->bus_ops = &ath9k_usb_bus_ops; common->ah = ah; common->hw = priv->hw; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 994fff1ff5198db245b3a3e2250105bfb6e6a8b9..92d5a6c5a2253b6fbc54e2ad4cb79d15b7b0e45b 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -314,6 +314,10 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, mod_timer(&priv->tx.cleanup_timer, jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL)); + /* perform spectral scan if requested. */ + if (test_bit(ATH_OP_SCANNING, &common->op_flags) && + priv->spec_priv.spectral_mode == SPECTRAL_CHANSCAN) + ath9k_cmn_spectral_scan_trigger(common, &priv->spec_priv); err: ath9k_htc_ps_restore(priv); return ret; @@ -1443,7 +1447,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw, key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; if (key->cipher == WLAN_CIPHER_SUITE_TKIP) key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - if (priv->ah->sw_mgmt_crypto && + if (priv->ah->sw_mgmt_crypto_tx && key->cipher == WLAN_CIPHER_SUITE_CCMP) key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; ret = 0; @@ -1687,7 +1691,9 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, return ret; } -static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw) +static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct ath9k_htc_priv *priv = hw->priv; struct ath_common *common = ath9k_hw_common(priv->ah); @@ -1701,7 +1707,8 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw) mutex_unlock(&priv->mutex); } -static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw) +static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct ath9k_htc_priv *priv = hw->priv; struct ath_common *common = ath9k_hw_common(priv->ah); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index f0484b1b617e9a043e91ad46cc2e042848caa648..a0f58e2aa553825d65a7f1ecc4cc1e06338ea444 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -946,7 +946,7 @@ static inline void convert_htc_flag(struct ath_rx_status *rx_stats, static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats, struct ath_htc_rx_status *rxstatus) { - rx_stats->rs_datalen = rxstatus->rs_datalen; + rx_stats->rs_datalen = be16_to_cpu(rxstatus->rs_datalen); rx_stats->rs_status = rxstatus->rs_status; rx_stats->rs_phyerr = rxstatus->rs_phyerr; rx_stats->rs_rssi = rxstatus->rs_rssi; @@ -1012,6 +1012,20 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, * separately to avoid doing two lookups for a rate for each frame. */ hdr = (struct ieee80211_hdr *)skb->data; + + /* + * Process PHY errors and return so that the packet + * can be dropped. + */ + if (rx_stats.rs_status & ATH9K_RXERR_PHY) { + /* TODO: Not using DFS processing now. */ + if (ath_cmn_process_fft(&priv->spec_priv, hdr, + &rx_stats, rx_status->mactime)) { + /* TODO: Code to collect spectral scan statistics */ + } + goto rx_next; + } + if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats, &decrypt_error, priv->rxfilter)) goto rx_next; diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h index 8e85efeaeffcf67d08dcc9e94d793f5bb0559a9f..88769b64b20b29d9f9cb71c6ffbca27cc3d314d4 100644 --- a/drivers/net/wireless/ath/ath9k/hw-ops.h +++ b/drivers/net/wireless/ath/ath9k/hw-ops.h @@ -41,10 +41,9 @@ static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds, ath9k_hw_ops(ah)->set_desc_link(ds, link); } -static inline bool ath9k_hw_calibrate(struct ath_hw *ah, - struct ath9k_channel *chan, - u8 rxchainmask, - bool longcal) +static inline int ath9k_hw_calibrate(struct ath_hw *ah, + struct ath9k_channel *chan, + u8 rxchainmask, bool longcal) { return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal); } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 2ad605760e2136584a56c25680cb14324fa3d30c..6d4b273469b1914ab019c7cdeb4ec00f1ddd329a 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "hw.h" @@ -446,8 +447,16 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah) common->macaddr[2 * i] = eeval >> 8; common->macaddr[2 * i + 1] = eeval & 0xff; } - if (sum == 0 || sum == 0xffff * 3) - return -EADDRNOTAVAIL; + if (!is_valid_ether_addr(common->macaddr)) { + ath_err(common, + "eeprom contains invalid mac address: %pM\n", + common->macaddr); + + random_ether_addr(common->macaddr); + ath_err(common, + "random mac address will be used: %pM\n", + common->macaddr); + } return 0; } @@ -1576,16 +1585,22 @@ static void ath9k_hw_init_mfp(struct ath_hw *ah) * frames when constructing CCMP AAD. */ REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT, 0xc7ff); - ah->sw_mgmt_crypto = false; + if (AR_SREV_9271(ah) || AR_DEVID_7010(ah)) + ah->sw_mgmt_crypto_tx = true; + else + ah->sw_mgmt_crypto_tx = false; + ah->sw_mgmt_crypto_rx = false; } else if (AR_SREV_9160_10_OR_LATER(ah)) { /* Disable hardware crypto for management frames */ REG_CLR_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE); REG_SET_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT); - ah->sw_mgmt_crypto = true; + ah->sw_mgmt_crypto_tx = true; + ah->sw_mgmt_crypto_rx = true; } else { - ah->sw_mgmt_crypto = true; + ah->sw_mgmt_crypto_tx = true; + ah->sw_mgmt_crypto_rx = true; } } @@ -1932,6 +1947,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, REGWRITE_BUFFER_FLUSH(ah); + ath9k_hw_gen_timer_start_tsf2(ah); + ath9k_hw_init_desc(ah); if (ath9k_hw_btcoex_is_enabled(ah)) @@ -1940,8 +1957,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (ath9k_hw_mci_is_enabled(ah)) ar9003_mci_check_bt(ah); - ath9k_hw_loadnf(ah, chan); - ath9k_hw_start_nfcal(ah, true); + if (AR_SREV_9300_20_OR_LATER(ah)) { + ath9k_hw_loadnf(ah, chan); + ath9k_hw_start_nfcal(ah, true); + } if (AR_SREV_9300_20_OR_LATER(ah)) ar9003_hw_bb_watchdog_config(ah); @@ -2309,7 +2328,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath_common *common = ath9k_hw_common(ah); - unsigned int chip_chainmask; u16 eeval; u8 ant_div_ctl1, tx_chainmask, rx_chainmask; @@ -2329,31 +2347,40 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) } eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); - if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) { - ath_err(common, - "no band has been marked as supported in EEPROM\n"); - return -EINVAL; + + if (eeval & AR5416_OPFLAGS_11A) { + if (ah->disable_5ghz) + ath_warn(common, "disabling 5GHz band\n"); + else + pCap->hw_caps |= ATH9K_HW_CAP_5GHZ; } - if (eeval & AR5416_OPFLAGS_11A) - pCap->hw_caps |= ATH9K_HW_CAP_5GHZ; + if (eeval & AR5416_OPFLAGS_11G) { + if (ah->disable_2ghz) + ath_warn(common, "disabling 2GHz band\n"); + else + pCap->hw_caps |= ATH9K_HW_CAP_2GHZ; + } - if (eeval & AR5416_OPFLAGS_11G) - pCap->hw_caps |= ATH9K_HW_CAP_2GHZ; + if ((pCap->hw_caps & (ATH9K_HW_CAP_2GHZ | ATH9K_HW_CAP_5GHZ)) == 0) { + ath_err(common, "both bands are disabled\n"); + return -EINVAL; + } if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah) || AR_SREV_9565(ah)) - chip_chainmask = 1; - else if (AR_SREV_9462(ah)) - chip_chainmask = 3; + pCap->chip_chainmask = 1; else if (!AR_SREV_9280_20_OR_LATER(ah)) - chip_chainmask = 7; - else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah)) - chip_chainmask = 3; + pCap->chip_chainmask = 7; + else if (!AR_SREV_9300_20_OR_LATER(ah) || + AR_SREV_9340(ah) || + AR_SREV_9462(ah) || + AR_SREV_9531(ah)) + pCap->chip_chainmask = 3; else - chip_chainmask = 7; + pCap->chip_chainmask = 7; pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK); /* @@ -2371,8 +2398,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) /* Use rx_chainmask from EEPROM. */ pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); - pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask); - pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask); + pCap->tx_chainmask = fixup_chainmask(pCap->chip_chainmask, pCap->tx_chainmask); + pCap->rx_chainmask = fixup_chainmask(pCap->chip_chainmask, pCap->rx_chainmask); ah->txchainmask = pCap->tx_chainmask; ah->rxchainmask = pCap->rx_chainmask; @@ -2886,6 +2913,16 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah) } EXPORT_SYMBOL(ath9k_hw_gettsf32); +void ath9k_hw_gen_timer_start_tsf2(struct ath_hw *ah) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + + if (timer_table->tsf2_enabled) { + REG_SET_BIT(ah, AR_DIRECT_CONNECT, AR_DC_AP_STA_EN); + REG_SET_BIT(ah, AR_RESET_TSF, AR_RESET_TSF2_ONCE); + } +} + struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, void (*trigger)(void *), void (*overflow)(void *), @@ -2896,7 +2933,11 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, struct ath_gen_timer *timer; if ((timer_index < AR_FIRST_NDP_TIMER) || - (timer_index >= ATH_MAX_GEN_TIMER)) + (timer_index >= ATH_MAX_GEN_TIMER)) + return NULL; + + if ((timer_index > AR_FIRST_NDP_TIMER) && + !AR_SREV_9300_20_OR_LATER(ah)) return NULL; timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); @@ -2910,6 +2951,11 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, timer->overflow = overflow; timer->arg = arg; + if ((timer_index > AR_FIRST_NDP_TIMER) && !timer_table->tsf2_enabled) { + timer_table->tsf2_enabled = true; + ath9k_hw_gen_timer_start_tsf2(ah); + } + return timer; } EXPORT_SYMBOL(ath_gen_timer_alloc); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 975074fc11bcdda44e65e4eaabe05b75b51c90c9..1cbd335515134e0d4c5ccbfd7fa8756c01a9801b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -217,8 +217,8 @@ #define AH_WOW_BEACON_MISS BIT(3) enum ath_hw_txq_subtype { - ATH_TXQ_AC_BE = 0, - ATH_TXQ_AC_BK = 1, + ATH_TXQ_AC_BK = 0, + ATH_TXQ_AC_BE = 1, ATH_TXQ_AC_VI = 2, ATH_TXQ_AC_VO = 3, }; @@ -244,13 +244,20 @@ enum ath9k_hw_caps { ATH9K_HW_CAP_2GHZ = BIT(11), ATH9K_HW_CAP_5GHZ = BIT(12), ATH9K_HW_CAP_APM = BIT(13), +#ifdef CONFIG_ATH9K_PCOEM ATH9K_HW_CAP_RTT = BIT(14), ATH9K_HW_CAP_MCI = BIT(15), - ATH9K_HW_CAP_DFS = BIT(16), - ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17), - ATH9K_HW_CAP_PAPRD = BIT(18), - ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(19), - ATH9K_HW_CAP_BT_ANT_DIV = BIT(20), + ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(16), + ATH9K_HW_CAP_BT_ANT_DIV = BIT(17), +#else + ATH9K_HW_CAP_RTT = 0, + ATH9K_HW_CAP_MCI = 0, + ATH9K_HW_WOW_DEVICE_CAPABLE = 0, + ATH9K_HW_CAP_BT_ANT_DIV = 0, +#endif + ATH9K_HW_CAP_DFS = BIT(18), + ATH9K_HW_CAP_PAPRD = BIT(19), + ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(20), }; /* @@ -269,6 +276,7 @@ struct ath9k_hw_capabilities { u16 rts_aggr_limit; u8 tx_chainmask; u8 rx_chainmask; + u8 chip_chainmask; u8 max_txchains; u8 max_rxchains; u8 num_gpio_pins; @@ -322,6 +330,7 @@ struct ath9k_ops_config { bool alt_mingainidx; bool no_pll_pwrsave; bool tx_gain_buffalo; + bool led_active_high; }; enum ath9k_int { @@ -517,6 +526,7 @@ struct ath_gen_timer { struct ath_gen_timer_table { struct ath_gen_timer *timers[ATH_MAX_GEN_TIMER]; u16 timer_mask; + bool tsf2_enabled; }; struct ath_hw_antcomb_conf { @@ -681,10 +691,8 @@ struct ath_hw_ops { bool power_off); void (*rx_enable)(struct ath_hw *ah); void (*set_desc_link)(void *ds, u32 link); - bool (*calibrate)(struct ath_hw *ah, - struct ath9k_channel *chan, - u8 rxchainmask, - bool longcal); + int (*calibrate)(struct ath_hw *ah, struct ath9k_channel *chan, + u8 rxchainmask, bool longcal); bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked, u32 *sync_cause_p); void (*set_txdesc)(struct ath_hw *ah, void *ds, @@ -726,6 +734,7 @@ enum ath_cal_list { #define AH_USE_EEPROM 0x1 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */ #define AH_FASTCC 0x4 +#define AH_NO_EEP_SWAP 0x8 /* Do not swap EEPROM data */ struct ath_hw { struct ath_ops reg_ops; @@ -747,7 +756,8 @@ struct ath_hw { } eeprom; const struct eeprom_ops *eep_ops; - bool sw_mgmt_crypto; + bool sw_mgmt_crypto_tx; + bool sw_mgmt_crypto_rx; bool is_pciexpress; bool aspm_enabled; bool is_monitoring; @@ -924,10 +934,16 @@ struct ath_hw { bool is_clk_25mhz; int (*get_mac_revision)(void); int (*external_reset)(void); + bool disable_2ghz; + bool disable_5ghz; const struct firmware *eeprom_blob; struct ath_dynack dynack; + + bool tpc_enabled; + u8 tx_power[Ar5416RateSize]; + u8 tx_power_stbc[Ar5416RateSize]; }; struct ath_bus_ops { @@ -1027,6 +1043,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah, struct ath_gen_timer *timer, u32 timer_next, u32 timer_period); +void ath9k_hw_gen_timer_start_tsf2(struct ath_hw *ah); void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer); void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer); @@ -1067,6 +1084,8 @@ int ar9003_paprd_init_table(struct ath_hw *ah); bool ar9003_paprd_is_done(struct ath_hw *ah); bool ar9003_is_paprd_enabled(struct ath_hw *ah); void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); +void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array, + struct ath9k_channel *chan); /* Hardware family op attach helpers */ int ar5008_hw_attach_phy_ops(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 3bd030494986a087bfd9744873649442fa67b572..d1c39346b2643c1f0b51c4c4ba071616e57863b9 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -88,6 +88,21 @@ static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = { static void ath9k_deinit_softc(struct ath_softc *sc); +static void ath9k_op_ps_wakeup(struct ath_common *common) +{ + ath9k_ps_wakeup((struct ath_softc *) common->priv); +} + +static void ath9k_op_ps_restore(struct ath_common *common) +{ + ath9k_ps_restore((struct ath_softc *) common->priv); +} + +static struct ath_ps_ops ath9k_ps_ops = { + .wakeup = ath9k_op_ps_wakeup, + .restore = ath9k_op_ps_restore, +}; + /* * Read and write, they both share the same lock. We do this to serialize * reads and writes on Atheros 802.11n PCI devices only. This is required @@ -172,17 +187,20 @@ static void ath9k_reg_notifier(struct wiphy *wiphy, ath_reg_notifier_apply(wiphy, request, reg); /* Set tx power */ - if (ah->curchan) { - sc->cur_chan->txpower = 2 * ah->curchan->chan->max_power; - ath9k_ps_wakeup(sc); - ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false); - sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; - /* synchronize DFS detector if regulatory domain changed */ - if (sc->dfs_detector != NULL) - sc->dfs_detector->set_dfs_domain(sc->dfs_detector, - request->dfs_region); - ath9k_ps_restore(sc); - } + if (!ah->curchan) + return; + + sc->cur_chan->txpower = 2 * ah->curchan->chan->max_power; + ath9k_ps_wakeup(sc); + ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false); + ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower, + sc->cur_chan->txpower, + &sc->cur_chan->cur_txpower); + /* synchronize DFS detector if regulatory domain changed */ + if (sc->dfs_detector != NULL) + sc->dfs_detector->set_dfs_domain(sc->dfs_detector, + request->dfs_region); + ath9k_ps_restore(sc); } /* @@ -348,12 +366,13 @@ static void ath9k_init_misc(struct ath_softc *sc) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; - sc->spec_config.enabled = 0; - sc->spec_config.short_repeat = true; - sc->spec_config.count = 8; - sc->spec_config.endless = false; - sc->spec_config.period = 0xFF; - sc->spec_config.fft_period = 0xF; + sc->spec_priv.ah = sc->sc_ah; + sc->spec_priv.spec_config.enabled = 0; + sc->spec_priv.spec_config.short_repeat = true; + sc->spec_priv.spec_config.count = 8; + sc->spec_priv.spec_config.endless = false; + sc->spec_priv.spec_config.period = 0xFF; + sc->spec_priv.spec_config.fft_period = 0xF; } static void ath9k_init_pcoem_platform(struct ath_softc *sc) @@ -362,6 +381,9 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc) struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); + if (!IS_ENABLED(CONFIG_ATH9K_PCOEM)) + return; + if (common->bus_ops->ath_bus_type != ATH_PCI) return; @@ -419,6 +441,9 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc) ah->config.no_pll_pwrsave = true; ath_info(common, "Disable PLL PowerSave\n"); } + + if (sc->driver_data & ATH9K_PCI_LED_ACT_HI) + ah->config.led_active_high = true; } static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, @@ -507,10 +532,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, ah->reg_ops.read = ath9k_ioread32; ah->reg_ops.write = ath9k_iowrite32; ah->reg_ops.rmw = ath9k_reg_rmw; - sc->sc_ah = ah; pCap = &ah->caps; common = ath9k_hw_common(ah); + + /* Will be cleared in ath9k_start() */ + set_bit(ATH_OP_INVALID, &common->op_flags); + + sc->sc_ah = ah; sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET); sc->tx99_power = MAX_RATE_POWER + 1; init_waitqueue_head(&sc->tx_wait); @@ -528,10 +557,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, ah->is_clk_25mhz = pdata->is_clk_25mhz; ah->get_mac_revision = pdata->get_mac_revision; ah->external_reset = pdata->external_reset; + ah->disable_2ghz = pdata->disable_2ghz; + ah->disable_5ghz = pdata->disable_5ghz; + if (!pdata->endian_check) + ah->ah_flags |= AH_NO_EEP_SWAP; } common->ops = &ah->reg_ops; common->bus_ops = bus_ops; + common->ps_ops = &ath9k_ps_ops; common->ah = ah; common->hw = sc->hw; common->priv = sc; @@ -866,9 +900,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, common = ath9k_hw_common(ah); ath9k_set_hw_capab(sc, hw); - /* Will be cleared in ath9k_start() */ - set_bit(ATH_OP_INVALID, &common->op_flags); - /* Initialize regulatory */ error = ath_regd_init(&common->regulatory, sc->hw->wiphy, ath9k_reg_notifier); diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 2343f56e64987730bcb3ad1cb5542c41dcd94310..b829263e3d0a7f2a849bb96bfeed4ab948a0c1bc 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -371,9 +371,15 @@ void ath_ani_calibrate(unsigned long data) /* Perform calibration if necessary */ if (longcal || shortcal) { - common->ani.caldone = - ath9k_hw_calibrate(ah, ah->curchan, - ah->rxchainmask, longcal); + int ret = ath9k_hw_calibrate(ah, ah->curchan, ah->rxchainmask, + longcal); + if (ret < 0) { + common->ani.caldone = 0; + ath9k_queue_reset(sc, RESET_TYPE_CALIBRATION); + return; + } + + common->ani.caldone = ret; } ath_dbg(common, ANI, diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 275205ab5f15ea15a00f341942efbe6f3bdc6adc..3e58bfa0c1fd1f711f56def45305f80cc8bdfa33 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, q = ATH9K_NUM_TX_QUEUES - 3; break; case ATH9K_TX_QUEUE_DATA: - for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++) - if (ah->txq[q].tqi_type == - ATH9K_TX_QUEUE_INACTIVE) - break; - if (q == ATH9K_NUM_TX_QUEUES) { - ath_err(common, "No available TX queue\n"); - return -1; - } + q = qinfo->tqi_subtype; break; default: ath_err(common, "Invalid TX queue type: %u\n", type); diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index aa69ceaad0be325e05f1c98e6f22c14b8f4b84d5..e55fa11894b6cc5d90846eeed6bbf2b157e21187 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -704,7 +704,7 @@ struct ath_tx_info { enum ath9k_pkt_type type; enum ath9k_key_type keytype; u8 keyix; - u8 txpower; + u8 txpower[4]; }; struct ath_hw; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 4f18a6be0c7d706092998c15bb290b3f6ddcdbc1..9a72640237cb7678500468343e6f09101c169e81 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -54,7 +54,8 @@ u8 ath9k_parse_mpdudensity(u8 mpdudensity) } } -static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq) +static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq, + bool sw_pending) { bool pending = false; @@ -65,6 +66,9 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq) goto out; } + if (!sw_pending) + goto out; + if (txq->mac80211_qnum >= 0) { struct list_head *list; @@ -229,8 +233,9 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) ath9k_calculate_summary_state(sc, sc->cur_chan); ath_startrecv(sc); - ath9k_cmn_update_txpow(ah, sc->curtxpow, - sc->cur_chan->txpower, &sc->curtxpow); + ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower, + sc->cur_chan->txpower, + &sc->cur_chan->cur_txpower); clear_bit(ATH_OP_HW_RESET, &common->op_flags); if (!sc->cur_chan->offchannel && start) { @@ -270,7 +275,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) return true; } -int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) +static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); @@ -281,6 +286,7 @@ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) __ath_cancel_work(sc); tasklet_disable(&sc->intr_tq); + tasklet_disable(&sc->bcon_tasklet); spin_lock_bh(&sc->sc_pcu_lock); if (!sc->cur_chan->offchannel) { @@ -326,6 +332,7 @@ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) out: spin_unlock_bh(&sc->sc_pcu_lock); + tasklet_enable(&sc->bcon_tasklet); tasklet_enable(&sc->intr_tq); return r; @@ -505,16 +512,13 @@ irqreturn_t ath_isr(int irq, void *dev) if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) return IRQ_NONE; - /* shared irq, not for us */ + if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) + return IRQ_NONE; + /* shared irq, not for us */ if (!ath9k_hw_intrpend(ah)) return IRQ_NONE; - if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) { - ath9k_hw_kill_interrupts(ah); - return IRQ_HANDLED; - } - /* * Figure out the reason(s) for the interrupt. Note * that the hal returns a pseudo-ISR that may include @@ -525,6 +529,9 @@ irqreturn_t ath_isr(int irq, void *dev) ath9k_debug_sync_cause(sc, sync_cause); status &= ah->imask; /* discard unasked-for bits */ + if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) + return IRQ_HANDLED; + /* * If there are no status bits set, then this interrupt was not * for me (should have been caught above). @@ -539,11 +546,10 @@ irqreturn_t ath_isr(int irq, void *dev) sched = true; /* - * If a FATAL or RXORN interrupt is received, we have to reset the - * chip immediately. + * If a FATAL interrupt is received, we have to reset the chip + * immediately. */ - if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) && - !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))) + if (status & ATH9K_INT_FATAL) goto chip_reset; if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) && @@ -598,23 +604,37 @@ chip_reset: #undef SCHED_INTR } -int ath_reset(struct ath_softc *sc) +/* + * This function is called when a HW reset cannot be deferred + * and has to be immediate. + */ +int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan) { + struct ath_common *common = ath9k_hw_common(sc->sc_ah); int r; + ath9k_hw_kill_interrupts(sc->sc_ah); + set_bit(ATH_OP_HW_RESET, &common->op_flags); + ath9k_ps_wakeup(sc); - r = ath_reset_internal(sc, NULL); + r = ath_reset_internal(sc, hchan); ath9k_ps_restore(sc); return r; } +/* + * When a HW reset can be deferred, it is added to the + * hw_reset_work workqueue, but we set ATH_OP_HW_RESET before + * queueing. + */ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); #ifdef CONFIG_ATH9K_DEBUGFS RESET_STAT_INC(sc, type); #endif + ath9k_hw_kill_interrupts(sc->sc_ah); set_bit(ATH_OP_HW_RESET, &common->op_flags); ieee80211_queue_work(sc->hw, &sc->hw_reset_work); } @@ -623,7 +643,9 @@ void ath_reset_work(struct work_struct *work) { struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work); - ath_reset(sc); + ath9k_ps_wakeup(sc); + ath_reset_internal(sc, NULL); + ath9k_ps_restore(sc); } /**********************/ @@ -707,7 +729,8 @@ static int ath9k_start(struct ieee80211_hw *hw) if (ah->led_pin >= 0) { ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - ath9k_hw_set_gpio(ah, ah->led_pin, 0); + ath9k_hw_set_gpio(ah, ah->led_pin, + (ah->config.led_active_high) ? 1 : 0); } /* @@ -849,7 +872,8 @@ static void ath9k_stop(struct ieee80211_hw *hw) spin_lock_bh(&sc->sc_pcu_lock); if (ah->led_pin >= 0) { - ath9k_hw_set_gpio(ah, ah->led_pin, 1); + ath9k_hw_set_gpio(ah, ah->led_pin, + (ah->config.led_active_high) ? 0 : 1); ath9k_hw_cfg_gpio_input(ah, ah->led_pin); } @@ -865,6 +889,9 @@ static void ath9k_stop(struct ieee80211_hw *hw) &sc->cur_chan->chandef); ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); + + set_bit(ATH_OP_INVALID, &common->op_flags); + ath9k_hw_phy_disable(ah); ath9k_hw_configpcipowersave(ah, true); @@ -873,7 +900,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) ath9k_ps_restore(sc); - set_bit(ATH_OP_INVALID, &common->op_flags); sc->ps_idle = prev_idle; mutex_unlock(&sc->mutex); @@ -1036,7 +1062,7 @@ static void ath9k_set_offchannel_state(struct ath_softc *sc) eth_zero_addr(common->curbssid); eth_broadcast_addr(common->bssidmask); - ether_addr_copy(common->macaddr, vif->addr); + memcpy(common->macaddr, vif->addr, ETH_ALEN); common->curaid = 0; ah->opmode = vif->type; ah->imask &= ~ATH9K_INT_SWBA; @@ -1077,7 +1103,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, ath9k_calculate_iter_data(sc, ctx, &iter_data); if (iter_data.has_hw_macaddr) - ether_addr_copy(common->macaddr, iter_data.hw_macaddr); + memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN); memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); ath_hw_setbssidmask(common); @@ -1167,7 +1193,8 @@ static void ath9k_assign_hw_queues(struct ieee80211_hw *hw, for (i = 0; i < IEEE80211_NUM_ACS; i++) vif->hw_queue[i] = i; - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT) vif->cab_queue = hw->queues - 2; else vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; @@ -1323,78 +1350,6 @@ static void ath9k_disable_ps(struct ath_softc *sc) ath_dbg(common, PS, "PowerSave disabled\n"); } -void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw) -{ - struct ath_softc *sc = hw->priv; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - u32 rxfilter; - - if (config_enabled(CONFIG_ATH9K_TX99)) - return; - - if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { - ath_err(common, "spectrum analyzer not implemented on this hardware\n"); - return; - } - - ath9k_ps_wakeup(sc); - rxfilter = ath9k_hw_getrxfilter(ah); - ath9k_hw_setrxfilter(ah, rxfilter | - ATH9K_RX_FILTER_PHYRADAR | - ATH9K_RX_FILTER_PHYERR); - - /* TODO: usually this should not be neccesary, but for some reason - * (or in some mode?) the trigger must be called after the - * configuration, otherwise the register will have its values reset - * (on my ar9220 to value 0x01002310) - */ - ath9k_spectral_scan_config(hw, sc->spectral_mode); - ath9k_hw_ops(ah)->spectral_scan_trigger(ah); - ath9k_ps_restore(sc); -} - -int ath9k_spectral_scan_config(struct ieee80211_hw *hw, - enum spectral_mode spectral_mode) -{ - struct ath_softc *sc = hw->priv; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - - if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { - ath_err(common, "spectrum analyzer not implemented on this hardware\n"); - return -1; - } - - switch (spectral_mode) { - case SPECTRAL_DISABLED: - sc->spec_config.enabled = 0; - break; - case SPECTRAL_BACKGROUND: - /* send endless samples. - * TODO: is this really useful for "background"? - */ - sc->spec_config.endless = 1; - sc->spec_config.enabled = 1; - break; - case SPECTRAL_CHANSCAN: - case SPECTRAL_MANUAL: - sc->spec_config.endless = 0; - sc->spec_config.enabled = 1; - break; - default: - return -1; - } - - ath9k_ps_wakeup(sc); - ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config); - ath9k_ps_restore(sc); - - sc->spectral_mode = spectral_mode; - - return 0; -} - static int ath9k_config(struct ieee80211_hw *hw, u32 changed) { struct ath_softc *sc = hw->priv; @@ -1455,8 +1410,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) if (changed & IEEE80211_CONF_CHANGE_POWER) { ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level); sc->cur_chan->txpower = 2 * conf->power_level; - ath9k_cmn_update_txpow(ah, sc->curtxpow, - sc->cur_chan->txpower, &sc->curtxpow); + ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower, + sc->cur_chan->txpower, + &sc->cur_chan->cur_txpower); } mutex_unlock(&sc->mutex); @@ -1553,6 +1509,40 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw, return 0; } +static int ath9k_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + int ret = 0; + + if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = ath9k_sta_add(hw, vif, sta); + ath_dbg(common, CONFIG, + "Add station: %pM\n", sta->addr); + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + ret = ath9k_sta_remove(hw, vif, sta); + ath_dbg(common, CONFIG, + "Remove station: %pM\n", sta->addr); + } + + if (ath9k_is_chanctx_enabled()) { + if (vif->type == NL80211_IFTYPE_STATION) { + if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) + ath_chanctx_event(sc, vif, + ATH_CHANCTX_EVENT_AUTHORIZED); + } + } + + return ret; +} + static void ath9k_sta_set_tx_filter(struct ath_hw *ah, struct ath_node *an, bool set) @@ -1677,7 +1667,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw, key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; if (key->cipher == WLAN_CIPHER_SUITE_TKIP) key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - if (sc->sc_ah->sw_mgmt_crypto && + if (sc->sc_ah->sw_mgmt_crypto_tx && key->cipher == WLAN_CIPHER_SUITE_CCMP) key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; ret = 0; @@ -1737,17 +1727,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n", bss_conf->bssid, bss_conf->assoc); - ether_addr_copy(avp->bssid, bss_conf->bssid); + memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN); avp->aid = bss_conf->aid; avp->assoc = bss_conf->assoc; ath9k_calculate_summary_state(sc, avp->chanctx); - - if (ath9k_is_chanctx_enabled()) { - if (bss_conf->assoc) - ath_chanctx_event(sc, vif, - ATH_CHANCTX_EVENT_ASSOC); - } } if (changed & BSS_CHANGED_IBSS) { @@ -1843,6 +1827,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, u16 tid, u16 *ssn, u8 buf_size) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); bool flush = false; int ret = 0; @@ -1854,6 +1839,12 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_RX_STOP: break; case IEEE80211_AMPDU_TX_START: + if (ath9k_is_chanctx_enabled()) { + if (test_bit(ATH_OP_SCANNING, &common->op_flags)) { + ret = -EBUSY; + break; + } + } ath9k_ps_wakeup(sc); ret = ath_tx_aggr_start(sc, sta, tid, ssn); if (!ret) @@ -1967,7 +1958,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, mutex_unlock(&sc->mutex); } -static bool ath9k_has_tx_pending(struct ath_softc *sc) +static bool ath9k_has_tx_pending(struct ath_softc *sc, + bool sw_pending) { int i, npend = 0; @@ -1975,7 +1967,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc) if (!ATH_TXQ_SETUP(sc, i)) continue; - npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]); + npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i], + sw_pending); if (npend) break; } @@ -1987,18 +1980,38 @@ static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + if (ath9k_is_chanctx_enabled()) { + if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) + goto flush; + /* + * If MCC is active, extend the flush timeout + * and wait for the HW/SW queues to become + * empty. This needs to be done outside the + * sc->mutex lock to allow the channel scheduler + * to switch channel contexts. + * + * The vif queues have been stopped in mac80211, + * so there won't be any incoming frames. + */ + __ath9k_flush(hw, queues, drop, true, true); + return; + } +flush: mutex_lock(&sc->mutex); - __ath9k_flush(hw, queues, drop); + __ath9k_flush(hw, queues, drop, true, false); mutex_unlock(&sc->mutex); } -void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop, + bool sw_pending, bool timeout_override) { struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - int timeout = HZ / 5; /* 200 ms */ + int timeout; bool drain_txq; cancel_delayed_work_sync(&sc->tx_complete_work); @@ -2013,7 +2026,17 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) return; } - if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc), + spin_lock_bh(&sc->chan_lock); + if (timeout_override) + timeout = HZ / 5; + else + timeout = sc->cur_chan->flush_timeout; + spin_unlock_bh(&sc->chan_lock); + + ath_dbg(common, CHAN_CTX, + "Flush timeout: %d\n", jiffies_to_msecs(timeout)); + + if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc, sw_pending), timeout) > 0) drop = false; @@ -2024,7 +2047,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) spin_unlock_bh(&sc->sc_pcu_lock); if (!drain_txq) - ath_reset(sc); + ath_reset(sc, NULL); ath9k_ps_restore(sc); } @@ -2036,7 +2059,7 @@ static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; - return ath9k_has_tx_pending(sc); + return ath9k_has_tx_pending(sc, true); } static int ath9k_tx_last_beacon(struct ieee80211_hw *hw) @@ -2167,14 +2190,17 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) return 0; } -static void ath9k_sw_scan_start(struct ieee80211_hw *hw) +static void ath9k_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); set_bit(ATH_OP_SCANNING, &common->op_flags); } -static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) +static void ath9k_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); @@ -2183,6 +2209,28 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT +static void ath9k_cancel_pending_offchannel(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + if (sc->offchannel.roc_vif) { + ath_dbg(common, CHAN_CTX, + "%s: Aborting RoC\n", __func__); + + del_timer_sync(&sc->offchannel.timer); + if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) + ath_roc_complete(sc, true); + } + + if (test_bit(ATH_OP_SCANNING, &common->op_flags)) { + ath_dbg(common, CHAN_CTX, + "%s: Aborting HW scan\n", __func__); + + del_timer_sync(&sc->offchannel.timer); + ath_scan_complete(sc, true); + } +} + static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { @@ -2313,7 +2361,6 @@ static int ath9k_add_chanctx(struct ieee80211_hw *hw, conf->def.chan->center_freq); ath_chanctx_set_channel(sc, ctx, &conf->def); - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ASSIGN); mutex_unlock(&sc->mutex); return 0; @@ -2370,6 +2417,8 @@ static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw, struct ath_chanctx *ctx = ath_chanctx_get(conf); int i; + ath9k_cancel_pending_offchannel(sc); + mutex_lock(&sc->mutex); ath_dbg(common, CHAN_CTX, @@ -2399,6 +2448,8 @@ static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ath_chanctx *ctx = ath_chanctx_get(conf); int ac; + ath9k_cancel_pending_offchannel(sc); + mutex_lock(&sc->mutex); ath_dbg(common, CHAN_CTX, @@ -2422,7 +2473,11 @@ static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (struct ath_vif *) vif->drv_priv; + struct ath_beacon_config *cur_conf; + struct ath_chanctx *go_ctx; + unsigned long timeout; bool changed = false; + u32 beacon_int; if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) return; @@ -2433,19 +2488,57 @@ static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); spin_lock_bh(&sc->chan_lock); - if (sc->next_chan || (sc->cur_chan != avp->chanctx)) { - sc->next_chan = avp->chanctx; + if (sc->next_chan || (sc->cur_chan != avp->chanctx)) changed = true; + spin_unlock_bh(&sc->chan_lock); + + if (!changed) + goto out; + + ath9k_cancel_pending_offchannel(sc); + + go_ctx = ath_is_go_chanctx_present(sc); + + if (go_ctx) { + /* + * Wait till the GO interface gets a chance + * to send out an NoA. + */ + spin_lock_bh(&sc->chan_lock); + sc->sched.mgd_prepare_tx = true; + cur_conf = &go_ctx->beacon; + beacon_int = TU_TO_USEC(cur_conf->beacon_interval); + spin_unlock_bh(&sc->chan_lock); + + timeout = usecs_to_jiffies(beacon_int * 2); + init_completion(&sc->go_beacon); + + mutex_unlock(&sc->mutex); + + if (wait_for_completion_timeout(&sc->go_beacon, + timeout) == 0) { + ath_dbg(common, CHAN_CTX, + "Failed to send new NoA\n"); + + spin_lock_bh(&sc->chan_lock); + sc->sched.mgd_prepare_tx = false; + spin_unlock_bh(&sc->chan_lock); + } + + mutex_lock(&sc->mutex); } + ath_dbg(common, CHAN_CTX, - "%s: Set chanctx state to FORCE_ACTIVE, changed: %d\n", - __func__, changed); + "%s: Set chanctx state to FORCE_ACTIVE for vif: %pM\n", + __func__, vif->addr); + + spin_lock_bh(&sc->chan_lock); + sc->next_chan = avp->chanctx; sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE; spin_unlock_bh(&sc->chan_lock); - if (changed) - ath_chanctx_set_next(sc, true); - + ath_chanctx_set_next(sc, true); +out: mutex_unlock(&sc->mutex); } @@ -2468,6 +2561,24 @@ void ath9k_fill_chanctx_ops(void) #endif +static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int *dbm) +{ + struct ath_softc *sc = hw->priv; + struct ath_vif *avp = (void *)vif->drv_priv; + + mutex_lock(&sc->mutex); + if (avp->chanctx) + *dbm = avp->chanctx->cur_txpower; + else + *dbm = sc->cur_chan->cur_txpower; + mutex_unlock(&sc->mutex); + + *dbm /= 2; + + return 0; +} + struct ieee80211_ops ath9k_ops = { .tx = ath9k_tx, .start = ath9k_start, @@ -2477,8 +2588,7 @@ struct ieee80211_ops ath9k_ops = { .remove_interface = ath9k_remove_interface, .config = ath9k_config, .configure_filter = ath9k_configure_filter, - .sta_add = ath9k_sta_add, - .sta_remove = ath9k_sta_remove, + .sta_state = ath9k_sta_state, .sta_notify = ath9k_sta_notify, .conf_tx = ath9k_conf_tx, .bss_info_changed = ath9k_bss_info_changed, @@ -2515,4 +2625,5 @@ struct ieee80211_ops ath9k_ops = { #endif .sw_scan_start = ath9k_sw_scan_start, .sw_scan_complete = ath9k_sw_scan_complete, + .get_txpower = ath9k_get_txpower, }; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index c018dea0b2e82da6177da57ad4ea111e23d34eec..f009b5b57e5ee72b8a3743ae98b53f5e8b59c45b 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -30,6 +30,7 @@ static const struct pci_device_id ath_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ +#ifdef CONFIG_ATH9K_PCOEM { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 0x002A, PCI_VENDOR_ID_AZWAVE, @@ -82,6 +83,7 @@ static const struct pci_device_id ath_pci_id_table[] = { PCI_VENDOR_ID_AZWAVE, 0x2C37), .driver_data = ATH9K_PCI_BT_ANT_DIV }, +#endif { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */ { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ @@ -102,6 +104,7 @@ static const struct pci_device_id ath_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ +#ifdef CONFIG_ATH9K_PCOEM /* PCI-E CUS198 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 0x0032, @@ -294,10 +297,12 @@ static const struct pci_device_id ath_pci_id_table[] = { PCI_VENDOR_ID_ASUSTEK, 0x850D), .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, +#endif { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ +#ifdef CONFIG_ATH9K_PCOEM /* PCI-E CUS217 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 0x0034, @@ -652,11 +657,14 @@ static const struct pci_device_id ath_pci_id_table[] = { 0x0036, PCI_VENDOR_ID_DELL, 0x020E), - .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, + .driver_data = ATH9K_PCI_AR9565_2ANT | + ATH9K_PCI_BT_ANT_DIV | + ATH9K_PCI_LED_ACT_HI}, /* PCI-E AR9565 (WB335) */ { PCI_VDEVICE(ATHEROS, 0x0036), .driver_data = ATH9K_PCI_BT_ANT_DIV }, +#endif { 0 } }; diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 6914e21816e44e02652103698f71623e51aa05c4..7395afbc5124b816ce13ff149e5da011f2fc8778 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -870,7 +870,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, */ if (rx_stats->rs_status & ATH9K_RXERR_PHY) { ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime); - if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) + if (ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, rx_status->mactime)) RX_STAT_INC(rx_spectral); return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 2a938f4feac5fd91e33fd3c61e5364de1809b591..fb11a9172f38a917878f16c06ec84b6dbeddfcfe 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -892,10 +892,21 @@ (AR_SREV_9330((_ah)) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_12)) +#ifdef CONFIG_ATH9K_PCOEM +#define AR_SREV_9462(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462)) #define AR_SREV_9485(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485)) +#define AR_SREV_9565(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565)) +#else +#define AR_SREV_9462(_ah) 0 +#define AR_SREV_9485(_ah) 0 +#define AR_SREV_9565(_ah) 0 +#endif + #define AR_SREV_9485_11_OR_LATER(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485) && \ + (AR_SREV_9485(_ah) && \ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9485_11)) #define AR_SREV_9485_OR_LATER(_ah) \ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485)) @@ -915,34 +926,30 @@ (AR_SREV_9285_12_OR_LATER(_ah) && \ ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1)) -#define AR_SREV_9462(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462)) #define AR_SREV_9462_20(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \ + (AR_SREV_9462(_ah) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20)) #define AR_SREV_9462_21(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \ + (AR_SREV_9462(_ah) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_21)) #define AR_SREV_9462_20_OR_LATER(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \ + (AR_SREV_9462(_ah) && \ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20)) #define AR_SREV_9462_21_OR_LATER(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \ + (AR_SREV_9462(_ah) && \ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_21)) -#define AR_SREV_9565(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565)) #define AR_SREV_9565_10(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \ + (AR_SREV_9565(_ah) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10)) #define AR_SREV_9565_101(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \ + (AR_SREV_9565(_ah) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_101)) #define AR_SREV_9565_11(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \ + (AR_SREV_9565(_ah) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_11)) #define AR_SREV_9565_11_OR_LATER(_ah) \ - (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \ + (AR_SREV_9565(_ah) && \ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9565_11)) #define AR_SREV_9550(_ah) \ @@ -1598,6 +1605,7 @@ enum { #define AR_RESET_TSF 0x8020 #define AR_RESET_TSF_ONCE 0x01000000 +#define AR_RESET_TSF2_ONCE 0x02000000 #define AR_MAX_CFP_DUR 0x8038 #define AR_CFP_VAL 0x0000FFFF @@ -1716,6 +1724,8 @@ enum { #define AR_TPC_CTS_S 8 #define AR_TPC_CHIRP 0x003f0000 #define AR_TPC_CHIRP_S 16 +#define AR_TPC_RPT 0x3f000000 +#define AR_TPC_RPT_S 24 #define AR_QUIET1 0x80fc #define AR_QUIET1_NEXT_QUIET_S 0 @@ -1959,6 +1969,8 @@ enum { #define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000 #define AR_MAC_PCU_GEN_TIMER_TSF_SEL 0x83d8 +#define AR_DIRECT_CONNECT 0x83a0 +#define AR_DC_AP_STA_EN 0x00000001 #define AR_AES_MUTE_MASK0 0x805c #define AR_AES_MUTE_MASK0_FC 0x0000FFFF diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 40ab65e6882f10cf0c4fa4707fb0b90065ea135a..ac4781f37e786150181139ffe562fca55da3a1fd 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -99,7 +99,7 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) static void ath9k_tx99_deinit(struct ath_softc *sc) { - ath_reset(sc); + ath_reset(sc, NULL); ath9k_ps_wakeup(sc); ath9k_tx99_stop(sc); @@ -127,7 +127,7 @@ static int ath9k_tx99_init(struct ath_softc *sc) memset(&txctl, 0, sizeof(txctl)); txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; - ath_reset(sc); + ath_reset(sc, NULL); ath9k_ps_wakeup(sc); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index d6e54a3c88f671f08ae3174ddabf6721fb39e051..e9bd02c2e8442ccd080c6ce0f977de08a6271518 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1096,6 +1096,37 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop) } } +static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, + u8 rateidx) +{ + u8 max_power; + struct ath_hw *ah = sc->sc_ah; + + if (sc->tx99_state) + return MAX_RATE_POWER; + + if (!AR_SREV_9300_20_OR_LATER(ah)) { + /* ar9002 is not sipported for the moment */ + return MAX_RATE_POWER; + } + + if (!bf->bf_state.bfs_paprd) { + struct sk_buff *skb = bf->bf_mpdu; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ath_frame_info *fi = get_frame_info(skb); + + if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC)) + max_power = min(ah->tx_power_stbc[rateidx], + fi->tx_power); + else + max_power = min(ah->tx_power[rateidx], fi->tx_power); + } else { + max_power = ah->paprd_training_power; + } + + return max_power; +} + static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_info *info, int len, bool rts) { @@ -1166,6 +1197,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, is_40, is_sgi, is_sp); if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; + + info->txpower[i] = ath_get_rate_txpower(sc, bf, rix); continue; } @@ -1193,6 +1226,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, phy, rate->bitrate * 100, len, rix, is_sp); + + info->txpower[i] = ath_get_rate_txpower(sc, bf, rix); } /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ @@ -1239,7 +1274,6 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, memset(&info, 0, sizeof(info)); info.is_first = true; info.is_last = true; - info.txpower = MAX_RATE_POWER; info.qcu = txq->axq_qnum; while (bf) { @@ -2063,6 +2097,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, fi->keyix = ATH9K_TXKEYIX_INVALID; fi->keytype = keytype; fi->framelen = framelen; + fi->tx_power = MAX_RATE_POWER; if (!rate) return; diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c index b80b2138ce3c745d93d9bc973ac83cbedc984aab..dca6df13fd5b9be5de049f56b595d94fa94aeda2 100644 --- a/drivers/net/wireless/ath/carl9170/phy.c +++ b/drivers/net/wireless/ath/carl9170/phy.c @@ -994,7 +994,7 @@ static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz, refsel0 = 0; refsel1 = 1; } - chansel = byte_rev_table[chansel]; + chansel = bitrev8(chansel); } else { if (freq == 2484) { chansel = 10 + (freq - 2274) / 5; @@ -1002,7 +1002,7 @@ static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz, } else chansel = 16 + (freq - 2272) / 5; chansel *= 4; - chansel = byte_rev_table[chansel]; + chansel = bitrev8(chansel); } d1 = chansel; diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 650be79c7ac97070f0fccd62cea916ce56d8c097..cfd0554cf140eb7390756401795794e1e3550914 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -86,7 +86,7 @@ static const struct radar_detector_specs fcc_radar_ref_types[] = { FCC_PATTERN(1, 0, 5, 150, 230, 1, 23), FCC_PATTERN(2, 6, 10, 200, 500, 1, 16), FCC_PATTERN(3, 11, 20, 200, 500, 1, 12), - FCC_PATTERN(4, 50, 100, 1000, 2000, 20, 1), + FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 20), FCC_PATTERN(5, 0, 1, 333, 333, 1, 9), }; @@ -105,7 +105,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = { JP_PATTERN(4, 0, 5, 150, 230, 1, 23), JP_PATTERN(5, 6, 10, 200, 500, 1, 16), JP_PATTERN(6, 11, 20, 200, 500, 1, 12), - JP_PATTERN(7, 50, 100, 1000, 2000, 20, 1), + JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20), JP_PATTERN(5, 0, 1, 333, 333, 1, 9), }; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index b71d2b33532de392fb68094309804bdd585d5c18..267c35d1f699fadc5f045a01ee8c6834712caea4 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -494,7 +494,9 @@ out: return ret; } -static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw) +static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct wcn36xx *wcn = hw->priv; @@ -502,7 +504,8 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw) wcn36xx_smd_start_scan(wcn); } -static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw) +static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct wcn36xx *wcn = hw->priv; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index d9f4b30dd343b01e1d58ac8c9deadbb246a82abd..38332a6dfb3a58d8ed246a8dac627ce18681212b 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -792,12 +792,13 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, } static int wil_cfg80211_del_station(struct wiphy *wiphy, - struct net_device *dev, const u8 *mac) + struct net_device *dev, + struct station_del_parameters *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); mutex_lock(&wil->mutex); - wil6210_disconnect(wil, mac); + wil6210_disconnect(wil, params->mac, params->reason_code, false); mutex_unlock(&wil->mutex); return 0; diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c index 8d99021d27a8806cef563879280c8bdf17c80c10..3249562d93b4f978eec71eb6886bdcf3d7c8666b 100644 --- a/drivers/net/wireless/ath/wil6210/debug.c +++ b/drivers/net/wireless/ath/wil6210/debug.c @@ -32,6 +32,23 @@ void wil_err(struct wil6210_priv *wil, const char *fmt, ...) va_end(args); } +void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...) +{ + if (net_ratelimit()) { + struct net_device *ndev = wil_to_ndev(wil); + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + netdev_err(ndev, "%pV", &vaf); + trace_wil6210_log_err(&vaf); + va_end(args); + } +} + void wil_info(struct wil6210_priv *wil, const char *fmt, ...) { struct net_device *ndev = wil_to_ndev(wil); diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 54a6ddc6301bcabe3c48784e850033c6a170410e..4e6e14501c2f5e654be41bbadae02ffcfc1c166f 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -573,8 +573,10 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, if (!frame) return -ENOMEM; - if (copy_from_user(frame, buf, len)) + if (copy_from_user(frame, buf, len)) { + kfree(frame); return -EIO; + } params.buf = frame; params.len = len; @@ -614,8 +616,10 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, return -ENOMEM; rc = simple_write_to_buffer(wmi, len, ppos, buf, len); - if (rc < 0) + if (rc < 0) { + kfree(wmi); return rc; + } cmd = &wmi[1]; cmdid = le16_to_cpu(wmi->id); diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c index 8c6f3b041f7773991c57b056229583f6e326d6c8..93c5cc16c515c8df5bfc3e2be9dea61e3249ee5a 100644 --- a/drivers/net/wireless/ath/wil6210/fw.c +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -15,7 +15,6 @@ */ #include #include -#include #include #include "wil6210.h" #include "fw.h" diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index 44cb71f5ea5b76a0ccb3c9f9db6029aa7b1b6ec8..d4acf93a9a02b9fbfd9f5d063726bdd5e84d47c5 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -446,7 +446,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size) if (size >= sizeof(*hdr)) { wil_err_fw(wil, "Stop at offset %ld" " record type %d [%zd bytes]\n", - (const void *)hdr - data, + (long)((const void *)hdr - data), le16_to_cpu(hdr->type), hdr_sz); } return -EINVAL; @@ -471,7 +471,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name) size_t sz; const void *d; - rc = request_firmware(&fw, name, wil_to_pcie_dev(wil)); + rc = request_firmware(&fw, name, wil_to_dev(wil)); if (rc) { wil_err_fw(wil, "Failed to load firmware %s\n", name); return rc; diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 90f416f239bd1478132c7abc693e1b060ac3c63b..4bcbd6297b3e848e6f9530fc7dd49ed2b0de3111 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -36,7 +36,8 @@ */ #define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL) -#define WIL6210_IMC_RX BIT_DMA_EP_RX_ICR_RX_DONE +#define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \ + BIT_DMA_EP_RX_ICR_RX_HTRSH) #define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \ BIT_DMA_EP_TX_ICR_TX_DONE_N(0)) #define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \ @@ -171,6 +172,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) u32 isr = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + offsetof(struct RGF_ICR, ICR)); + bool need_unmask = true; trace_wil6210_irq_rx(isr); wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); @@ -182,12 +184,24 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) wil6210_mask_irq_rx(wil); - if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) { + /* RX_DONE and RX_HTRSH interrupts are the same if interrupt + * moderation is not used. Interrupt moderation may cause RX + * buffer overflow while RX_DONE is delayed. The required + * action is always the same - should empty the accumulated + * packets from the RX ring. + */ + if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) { wil_dbg_irq(wil, "RX done\n"); - isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE; + + if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH) + wil_err_ratelimited(wil, "Received \"Rx buffer is in risk " + "of overflow\" interrupt\n"); + + isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH); if (test_bit(wil_status_reset_done, &wil->status)) { if (test_bit(wil_status_napi_en, &wil->status)) { wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); + need_unmask = false; napi_schedule(&wil->napi_rx); } else { wil_err(wil, "Got Rx interrupt while " @@ -204,6 +218,10 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) /* Rx IRQ will be enabled when NAPI processing finished */ atomic_inc(&wil->isr_count_rx); + + if (unlikely(need_unmask)) + wil6210_unmask_irq_rx(wil); + return IRQ_HANDLED; } @@ -213,6 +231,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) u32 isr = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICR)); + bool need_unmask = true; trace_wil6210_irq_tx(isr); wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); @@ -231,6 +250,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) isr &= ~(BIT(25) - 1UL); if (test_bit(wil_status_reset_done, &wil->status)) { wil_dbg_txrx(wil, "NAPI(Tx) schedule\n"); + need_unmask = false; napi_schedule(&wil->napi_tx); } else { wil_err(wil, "Got Tx interrupt while in reset\n"); @@ -243,6 +263,10 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) /* Tx IRQ will be enabled when NAPI processing finished */ atomic_inc(&wil->isr_count_tx); + + if (unlikely(need_unmask)) + wil6210_unmask_irq_tx(wil); + return IRQ_HANDLED; } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 6500caf8d609ccd90fae94ce704635df1b50cb16..8ff3fe34fe05ef964444f3299e1a3763355ff01c 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -38,6 +38,65 @@ static unsigned int itr_trsh = WIL6210_ITR_TRSH_DEFAULT; module_param(itr_trsh, uint, S_IRUGO); MODULE_PARM_DESC(itr_trsh, " Interrupt moderation threshold, usecs."); +/* We allow allocation of more than 1 page buffers to support large packets. + * It is suboptimal behavior performance wise in case MTU above page size. + */ +unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - ETH_HLEN; +static int mtu_max_set(const char *val, const struct kernel_param *kp) +{ + int ret; + + /* sets mtu_max directly. no need to restore it in case of + * illegal value since we assume this will fail insmod + */ + ret = param_set_uint(val, kp); + if (ret) + return ret; + + if (mtu_max < 68 || mtu_max > IEEE80211_MAX_DATA_LEN_DMG) + ret = -EINVAL; + + return ret; +} + +static struct kernel_param_ops mtu_max_ops = { + .set = mtu_max_set, + .get = param_get_uint, +}; + +module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO); +MODULE_PARM_DESC(mtu_max, " Max MTU value."); + +static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT; +static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT; + +static int ring_order_set(const char *val, const struct kernel_param *kp) +{ + int ret; + uint x; + + ret = kstrtouint(val, 0, &x); + if (ret) + return ret; + + if ((x < WIL_RING_SIZE_ORDER_MIN) || (x > WIL_RING_SIZE_ORDER_MAX)) + return -EINVAL; + + *((uint *)kp->arg) = x; + + return 0; +} + +static struct kernel_param_ops ring_order_ops = { + .set = ring_order_set, + .get = param_get_uint, +}; + +module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO); +MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order"); +module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO); +MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order"); + #define RST_DELAY (20) /* msec, for loop in @wil_target_reset */ #define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */ @@ -74,7 +133,8 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, __raw_writel(*s++, d++); } -static void wil_disconnect_cid(struct wil6210_priv *wil, int cid) +static void wil_disconnect_cid(struct wil6210_priv *wil, int cid, + u16 reason_code, bool from_event) { uint i; struct net_device *ndev = wil_to_ndev(wil); @@ -86,7 +146,9 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid) sta->data_port_open = false; if (sta->status != wil_sta_unused) { - wmi_disconnect_sta(wil, sta->addr, WLAN_REASON_DEAUTH_LEAVING); + if (!from_event) + wmi_disconnect_sta(wil, sta->addr, reason_code); + switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: @@ -118,7 +180,8 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid) memset(&sta->stats, 0, sizeof(sta->stats)); } -static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid) +static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, + u16 reason_code, bool from_event) { int cid = -ENOENT; struct net_device *ndev = wil_to_ndev(wil); @@ -133,10 +196,10 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid) } if (cid >= 0) /* disconnect 1 peer */ - wil_disconnect_cid(wil, cid); + wil_disconnect_cid(wil, cid, reason_code, from_event); else /* disconnect all */ for (cid = 0; cid < WIL6210_MAX_CID; cid++) - wil_disconnect_cid(wil, cid); + wil_disconnect_cid(wil, cid, reason_code, from_event); /* link state */ switch (wdev->iftype) { @@ -145,8 +208,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid) wil_link_off(wil); if (test_bit(wil_status_fwconnected, &wil->status)) { clear_bit(wil_status_fwconnected, &wil->status); - cfg80211_disconnected(ndev, - WLAN_STATUS_UNSPECIFIED_FAILURE, + cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL); } else if (test_bit(wil_status_fwconnecting, &wil->status)) { cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, @@ -166,7 +228,7 @@ static void wil_disconnect_worker(struct work_struct *work) struct wil6210_priv, disconnect_worker); mutex_lock(&wil->mutex); - _wil6210_disconnect(wil, NULL); + _wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false); mutex_unlock(&wil->mutex); } @@ -188,6 +250,7 @@ static void wil_scan_timer_fn(ulong x) clear_bit(wil_status_fwready, &wil->status); wil_err(wil, "Scan timeout detected, start fw error recovery\n"); + wil->recovery_state = fw_recovery_pending; schedule_work(&wil->fw_error_worker); } @@ -223,6 +286,11 @@ static void wil_fw_error_worker(struct work_struct *work) wil_dbg_misc(wil, "fw error worker\n"); + if (!netif_running(wil_to_ndev(wil))) { + wil_info(wil, "No recovery - interface is down\n"); + return; + } + /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO * passed since last recovery attempt */ @@ -257,9 +325,12 @@ static void wil_fw_error_worker(struct work_struct *work) break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: + wil_info(wil, "No recovery for AP-like interface\n"); /* recovery in these modes is done by upper layers */ break; default: + wil_err(wil, "No recovery - unknown interface type %d\n", + wdev->iftype); break; } mutex_unlock(&wil->mutex); @@ -291,7 +362,7 @@ static void wil_connect_worker(struct work_struct *work) wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid); - rc = wil_vring_init_tx(wil, ringid, WIL6210_TX_RING_SIZE, cid, 0); + rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0); wil->pending_connect_cid = -1; if (rc == 0) { wil->sta[cid].status = wil_sta_connected; @@ -346,12 +417,23 @@ int wil_priv_init(struct wil6210_priv *wil) return 0; } -void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid) +/** + * wil6210_disconnect - disconnect one connection + * @wil: driver context + * @bssid: peer to disconnect, NULL to disconnect all + * @reason_code: Reason code for the Disassociation frame + * @from_event: whether is invoked from FW event handler + * + * Disconnect and release associated resources. If invoked not from the + * FW event handler, issue WMI command(s) to trigger MAC disconnect. + */ +void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, + u16 reason_code, bool from_event) { wil_dbg_misc(wil, "%s()\n", __func__); del_timer_sync(&wil->connect_timer); - _wil6210_disconnect(wil, bssid); + _wil6210_disconnect(wil, bssid, reason_code, from_event); } void wil_priv_deinit(struct wil6210_priv *wil) @@ -363,7 +445,7 @@ void wil_priv_deinit(struct wil6210_priv *wil) cancel_work_sync(&wil->disconnect_worker); cancel_work_sync(&wil->fw_error_worker); mutex_lock(&wil->mutex); - wil6210_disconnect(wil, NULL); + wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); mutex_unlock(&wil->mutex); wmi_event_flush(wil); destroy_workqueue(wil->wmi_wq_conn); @@ -395,7 +477,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil) static int wil_target_reset(struct wil6210_priv *wil) { int delay = 0; - u32 hw_state; + u32 x; u32 rev_id; bool is_sparrow = (wil->board->board == WIL_BOARD_SPARROW); @@ -410,9 +492,25 @@ static int wil_target_reset(struct wil6210_priv *wil) S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); wil_halt_cpu(wil); - C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); /* 40 MHz */ + + /* Clear Fw Download notification */ + C(RGF_USER_USAGE_6, BIT(0)); if (is_sparrow) { + S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN); + /* XTAL stabilization should take about 3ms */ + usleep_range(5000, 7000); + x = R(RGF_CAF_PLL_LOCK_STATUS); + if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) { + wil_err(wil, "Xtal stabilization timeout\n" + "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x); + return -ETIME; + } + /* switch 10k to XTAL*/ + C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF); + /* 40 MHz */ + C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); + W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); } @@ -453,13 +551,13 @@ static int wil_target_reset(struct wil6210_priv *wil) /* wait until device ready. typical time is 200..250 msec */ do { msleep(RST_DELAY); - hw_state = R(RGF_USER_HW_MACHINE_STATE); + x = R(RGF_USER_HW_MACHINE_STATE); if (delay++ > RST_COUNT) { wil_err(wil, "Reset not completed, hw_state 0x%08x\n", - hw_state); + x); return -ETIME; } - } while (hw_state != HW_MACHINE_BOOT_DONE); + } while (x != HW_MACHINE_BOOT_DONE); /* TODO: Erez check rev_id != 1 */ if (!is_sparrow && (rev_id != 1)) @@ -535,7 +633,7 @@ int wil_reset(struct wil6210_priv *wil) WARN_ON(test_bit(wil_status_napi_en, &wil->status)); cancel_work_sync(&wil->disconnect_worker); - wil6210_disconnect(wil, NULL); + wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); wil->status = 0; /* prevent NAPI from being scheduled */ @@ -640,7 +738,7 @@ int __wil_up(struct wil6210_priv *wil) return rc; /* Rx VRING. After MAC and beacon */ - rc = wil_rx_init(wil); + rc = wil_rx_init(wil, 1 << rx_ring_order); if (rc) return rc; diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 239965106c05d1f878f52cd8b552047d2743d998..e81703ca7701cbc1e04413d51c3af25a2d4d7986 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -41,7 +41,7 @@ static int wil_change_mtu(struct net_device *ndev, int new_mtu) { struct wil6210_priv *wil = ndev_to_wil(ndev); - if (new_mtu < 68 || new_mtu > (TX_BUF_LEN - ETH_HLEN)) { + if (new_mtu < 68 || new_mtu > mtu_max) { wil_err(wil, "invalid MTU %d\n", new_mtu); return -EINVAL; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 2936ef0c18cbc700003760e5a93a9f504b9d8840..e3f8bdce5abc46ab6ee39f2107cfc2a3b0590026 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -206,12 +206,10 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, u32 i, int headroom) { struct device *dev = wil_to_dev(wil); - unsigned int sz = RX_BUF_LEN; + unsigned int sz = mtu_max + ETH_HLEN; struct vring_rx_desc dd, *d = ⅆ volatile struct vring_rx_desc *_d = &vring->va[i].rx; dma_addr_t pa; - - /* TODO align */ struct sk_buff *skb = dev_alloc_skb(sz + headroom); if (unlikely(!skb)) @@ -385,7 +383,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, struct vring_rx_desc *d; struct sk_buff *skb; dma_addr_t pa; - unsigned int sz = RX_BUF_LEN; + unsigned int sz = mtu_max + ETH_HLEN; u16 dmalen; u8 ftype; u8 ds_bits; @@ -596,7 +594,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) wil_rx_refill(wil, v->size); } -int wil_rx_init(struct wil6210_priv *wil) +int wil_rx_init(struct wil6210_priv *wil, u16 size) { struct vring *vring = &wil->vring_rx; int rc; @@ -608,7 +606,7 @@ int wil_rx_init(struct wil6210_priv *wil) return -EINVAL; } - vring->size = WIL6210_RX_RING_SIZE; + vring->size = size; rc = wil_vring_alloc(wil, vring); if (rc) return rc; @@ -646,7 +644,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, .action = cpu_to_le32(WMI_VRING_CMD_ADD), .vring_cfg = { .tx_sw_ring = { - .max_mpdu_size = cpu_to_le16(TX_BUF_LEN), + .max_mpdu_size = + cpu_to_le16(mtu_max + ETH_HLEN), .ring_size = cpu_to_le16(size), }, .ringid = id, @@ -927,8 +926,9 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, wil_dbg_txrx(wil, "%s()\n", __func__); if (avail < 1 + nr_frags) { - wil_err(wil, "Tx ring full. No space for %d fragments\n", - 1 + nr_frags); + wil_err_ratelimited(wil, + "Tx ring full. No space for %d fragments\n", + 1 + nr_frags); return -ENOMEM; } _d = &vring->va[i].tx; diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index de046716d2b7e2de3c96e2f8aaff266796c78967..630aeb5fa7f4a50f37b90d99137ce591bc4c2b9e 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -21,8 +21,8 @@ #define BUF_HW_OWNED (0) /* size of max. Tx/Rx buffers, as supported by FW */ -#define RX_BUF_LEN (2242) -#define TX_BUF_LEN (2242) +#define TXRX_BUF_LEN_DEFAULT (2242) + /* how many bytes to reserve for rtap header? */ #define WIL6210_RTAP_SIZE (128) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index ce6488e42091cf8a935bfe87dd84baad34224656..c6ec5b99ac7d5849995ae186da4deeb2df96140e 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -24,6 +24,7 @@ #include "wil_platform.h" extern bool no_fw_recovery; +extern unsigned int mtu_max; #define WIL_NAME "wil6210" #define WIL_FW_NAME "wil6210.fw" @@ -48,8 +49,11 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL6210_MEM_SIZE (2*1024*1024UL) -#define WIL6210_RX_RING_SIZE (128) -#define WIL6210_TX_RING_SIZE (512) +#define WIL_RX_RING_SIZE_ORDER_DEFAULT (9) +#define WIL_TX_RING_SIZE_ORDER_DEFAULT (9) +/* limit ring size in range [32..32k] */ +#define WIL_RING_SIZE_ORDER_MIN (5) +#define WIL_RING_SIZE_ORDER_MAX (15) #define WIL6210_MAX_TX_RINGS (24) /* HW limit */ #define WIL6210_MAX_CID (8) /* HW limit */ #define WIL6210_NAPI_BUDGET (16) /* arbitrary */ @@ -117,12 +121,15 @@ struct RGF_ICR { #define BIT_USER_USER_ICR_SW_INT_2 BIT(18) #define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0 (0x880c18) #define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c) +#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */ + #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2) #define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0) #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */ #define RGF_DMA_EP_RX_ICR (0x881bd0) /* struct RGF_ICR */ #define BIT_DMA_EP_RX_ICR_RX_DONE BIT(0) + #define BIT_DMA_EP_RX_ICR_RX_HTRSH BIT(1) #define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */ #define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0) #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1) @@ -152,6 +159,10 @@ struct RGF_ICR { #define RGF_MAC_MTRL_COUNTER_0 (0x886aa8) #define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */ +#define RGF_CAF_OSC_CONTROL (0x88afa4) + #define BIT_CAF_OSC_XTAL_EN BIT(0) +#define RGF_CAF_PLL_LOCK_STATUS (0x88afec) + #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) /* popular locations */ #define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD) @@ -461,10 +472,14 @@ struct wil6210_priv { #define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w)) #define wil_to_ndev(i) (wil_to_wdev(i)->netdev) #define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr)) -#define wil_to_pcie_dev(i) (&i->pdev->dev) +__printf(2, 3) void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...); +__printf(2, 3) void wil_err(struct wil6210_priv *wil, const char *fmt, ...); +__printf(2, 3) +void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...); +__printf(2, 3) void wil_info(struct wil6210_priv *wil, const char *fmt, ...); #define wil_dbg(wil, fmt, arg...) do { \ netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \ @@ -575,9 +590,10 @@ void wil_wdev_free(struct wil6210_priv *wil); int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan); int wmi_pcp_stop(struct wil6210_priv *wil); -void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid); +void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, + u16 reason_code, bool from_event); -int wil_rx_init(struct wil6210_priv *wil); +int wil_rx_init(struct wil6210_priv *wil, u16 size); void wil_rx_fini(struct wil6210_priv *wil); /* TX API */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 4311df982c6074cf54e2fe43a8971f4aa355725e..63476c86cd0e1b159be33fc85753583a1ca4cd34 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -478,15 +478,15 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, void *d, int len) { struct wmi_disconnect_event *evt = d; + u16 reason_code = le16_to_cpu(evt->protocol_reason_status); - wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n", - evt->bssid, - evt->protocol_reason_status, evt->disconnect_reason); + wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", + evt->bssid, reason_code, evt->disconnect_reason); wil->sinfo_gen++; mutex_lock(&wil->mutex); - wil6210_disconnect(wil, evt->bssid); + wil6210_disconnect(wil, evt->bssid, reason_code, true); mutex_unlock(&wil->mutex); } @@ -1025,7 +1025,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) struct wmi_cfg_rx_chain_cmd cmd = { .action = WMI_RX_CHAIN_ADD, .rx_sw_ring = { - .max_mpdu_size = cpu_to_le16(RX_BUF_LEN), + .max_mpdu_size = cpu_to_le16(mtu_max + ETH_HLEN), .ring_mem_base = cpu_to_le64(vring->pa), .ring_size = cpu_to_le16(vring->size), }, diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 5d4173ee55bcc4aaa32cbab54390fe3c3c242502..47731cb0d8155d0d9c506f494beca5fa96bcf6d0 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -5110,7 +5110,9 @@ static void b43_op_sta_notify(struct ieee80211_hw *hw, B43_WARN_ON(!vif || wl->vif != vif); } -static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw) +static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; @@ -5124,7 +5126,8 @@ static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw) mutex_unlock(&wl->mutex); } -static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw) +static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile index 90a977fe9a648d0f34eda15bbcba305eac6d35c0..dc4c75083085bbe3afe09d53ed68ffe207c93102 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile @@ -23,15 +23,15 @@ ccflags-y += -D__CHECK_ENDIAN__ obj-$(CONFIG_BRCMFMAC) += brcmfmac.o brcmfmac-objs += \ - wl_cfg80211.o \ + cfg80211.o \ chip.o \ fwil.o \ fweh.o \ fwsignal.o \ p2p.o \ proto.o \ - dhd_common.o \ - dhd_linux.o \ + common.o \ + core.o \ firmware.o \ feature.o \ btcoex.o \ @@ -43,14 +43,14 @@ brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \ flowring.o \ msgbuf.o brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ - dhd_sdio.o \ + sdio.o \ bcmsdh.o brcmfmac-$(CONFIG_BRCMFMAC_USB) += \ usb.o brcmfmac-$(CONFIG_BRCMFMAC_PCIE) += \ pcie.o brcmfmac-$(CONFIG_BRCMDBG) += \ - dhd_dbg.o + debug.o brcmfmac-$(CONFIG_BRCM_TRACING) += \ tracepoint.o brcmfmac-$(CONFIG_OF) += \ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c index a159ff3427de9fa7c3e42c508bc6a0c8930e5cdb..8e0e91c4a0b1670526683ced476e7b366451752e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c @@ -25,10 +25,10 @@ #include #include -#include "dhd.h" -#include "dhd_bus.h" +#include "core.h" +#include "bus.h" #include "fwsignal.h" -#include "dhd_dbg.h" +#include "debug.h" #include "tracepoint.h" #include "proto.h" #include "bcdc.h" diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 8dbd5dbb78fd14c8b697c04379e88bdf7442784f..f754ffcd030864ffd93f9bff4f86e80e249ea7c4 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -41,9 +41,9 @@ #include #include #include "chip.h" -#include "dhd_bus.h" -#include "dhd_dbg.h" -#include "sdio_host.h" +#include "bus.h" +#include "debug.h" +#include "sdio.h" #include "of.h" #define SDIOH_API_ACCESS_RETRY_LIMIT 2 @@ -1064,6 +1064,16 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, if (!sdiodev->pdata) brcmf_of_probe(sdiodev); +#ifdef CONFIG_PM_SLEEP + /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ + * is true or when platform data OOB irq is true). + */ + if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) && + ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) || + (sdiodev->pdata->oob_irq_supported))) + bus_if->wowl_supported = true; +#endif + atomic_set(&sdiodev->suspend, false); init_waitqueue_head(&sdiodev->request_word_wait); init_waitqueue_head(&sdiodev->request_buffer_wait); @@ -1116,34 +1126,39 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) brcmf_dbg(SDIO, "Exit\n"); } +void brcmf_sdio_wowl_config(struct device *dev, bool enabled) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + + brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled); + sdiodev->wowl_enabled = enabled; +} + #ifdef CONFIG_PM_SLEEP static int brcmf_ops_sdio_suspend(struct device *dev) { - mmc_pm_flag_t sdio_flags; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; - int ret = 0; + mmc_pm_flag_t sdio_flags; brcmf_dbg(SDIO, "Enter\n"); - sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]); - if (!(sdio_flags & MMC_PM_KEEP_POWER)) { - brcmf_err("Host can't keep power while suspended\n"); - return -EINVAL; - } - atomic_set(&sdiodev->suspend, true); - ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER); - if (ret) { - brcmf_err("Failed to set pm_flags\n"); - atomic_set(&sdiodev->suspend, false); - return ret; + if (sdiodev->wowl_enabled) { + sdio_flags = MMC_PM_KEEP_POWER; + if (sdiodev->pdata->oob_irq_supported) + enable_irq_wake(sdiodev->pdata->oob_irq_nr); + else + sdio_flags = MMC_PM_WAKE_SDIO_IRQ; + if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags)) + brcmf_err("Failed to set pm_flags %x\n", sdio_flags); } brcmf_sdio_wd_timer(sdiodev->bus, 0); - return ret; + return 0; } static int brcmf_ops_sdio_resume(struct device *dev) @@ -1152,6 +1167,8 @@ static int brcmf_ops_sdio_resume(struct device *dev) struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; brcmf_dbg(SDIO, "Enter\n"); + if (sdiodev->pdata->oob_irq_supported) + disable_irq_wake(sdiodev->pdata->oob_irq_nr); brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS); atomic_set(&sdiodev->suspend, false); return 0; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c index a29ac4977b3a12e009f2947c09bd40d3b604ccde..0445163991b72cf34e78b353825b6505e3775dd1 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c @@ -20,13 +20,13 @@ #include #include #include -#include -#include +#include "core.h" +#include "debug.h" #include "fwil.h" #include "fwil_types.h" #include "btcoex.h" #include "p2p.h" -#include "wl_cfg80211.h" +#include "cfg80211.h" /* T1 start SCO/eSCO priority suppression */ #define BRCMF_BTCOEX_OPPR_WIN_TIME 2000 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h similarity index 98% rename from drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h rename to drivers/net/wireless/brcm80211/brcmfmac/bus.h index 80e73a1262be81b6d143f4a8967e960def536fa7..ef344e47218a3ab153f86fba35f510c6dc6cd809 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h @@ -14,10 +14,10 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef _BRCMF_BUS_H_ -#define _BRCMF_BUS_H_ +#ifndef BRCMFMAC_BUS_H +#define BRCMFMAC_BUS_H -#include "dhd_dbg.h" +#include "debug.h" /* IDs of the 6 default common rings of msgbuf protocol */ #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT 0 @@ -227,8 +227,7 @@ void brcmf_txflowblock(struct device *dev, bool state); void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success); int brcmf_bus_start(struct device *dev); -s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, - u32 len); +s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len); void brcmf_bus_add_txhdrlen(struct device *dev, uint len); #ifdef CONFIG_BRCMFMAC_SDIO @@ -241,4 +240,4 @@ void brcmf_usb_exit(void); void brcmf_usb_register(void); #endif -#endif /* _BRCMF_BUS_H_ */ +#endif /* BRCMFMAC_BUS_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c similarity index 95% rename from drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c rename to drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index 39b45c038a93ec2a538665bbe90498230493ec42..3aecc5f4871963353ef46206288d2fc6cbc9ec64 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -26,18 +26,18 @@ #include #include #include -#include "dhd.h" -#include "dhd_dbg.h" +#include "core.h" +#include "debug.h" #include "tracepoint.h" #include "fwil_types.h" #include "p2p.h" #include "btcoex.h" -#include "wl_cfg80211.h" +#include "cfg80211.h" #include "feature.h" #include "fwil.h" #include "proto.h" #include "vendor.h" -#include "dhd_bus.h" +#include "bus.h" #define BRCMF_SCAN_IE_LEN_MAX 2048 #define BRCMF_PNO_VERSION 2 @@ -520,6 +520,95 @@ brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) ADDR_INDIRECT); } +static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) +{ + struct brcmf_mbss_ssid_le mbss_ssid_le; + int bsscfgidx; + int err; + + memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le)); + bsscfgidx = brcmf_get_next_free_bsscfgidx(ifp->drvr); + if (bsscfgidx < 0) + return bsscfgidx; + + mbss_ssid_le.bsscfgidx = cpu_to_le32(bsscfgidx); + mbss_ssid_le.SSID_len = cpu_to_le32(5); + sprintf(mbss_ssid_le.SSID, "ssid%d" , bsscfgidx); + + err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le, + sizeof(mbss_ssid_le)); + if (err < 0) + brcmf_err("setting ssid failed %d\n", err); + + return err; +} + +/** + * brcmf_ap_add_vif() - create a new AP virtual interface for multiple BSS + * + * @wiphy: wiphy device of new interface. + * @name: name of the new interface. + * @flags: not used. + * @params: contains mac address for AP device. + */ +static +struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, + u32 *flags, struct vif_params *params) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct brcmf_cfg80211_vif *vif; + int err; + + if (brcmf_cfg80211_vif_event_armed(cfg)) + return ERR_PTR(-EBUSY); + + brcmf_dbg(INFO, "Adding vif \"%s\"\n", name); + + vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP, false); + if (IS_ERR(vif)) + return (struct wireless_dev *)vif; + + brcmf_cfg80211_arm_vif_event(cfg, vif); + + err = brcmf_cfg80211_request_ap_if(ifp); + if (err) { + brcmf_cfg80211_arm_vif_event(cfg, NULL); + goto fail; + } + + /* wait for firmware event */ + err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD, + msecs_to_jiffies(1500)); + brcmf_cfg80211_arm_vif_event(cfg, NULL); + if (!err) { + brcmf_err("timeout occurred\n"); + err = -EIO; + goto fail; + } + + /* interface created in firmware */ + ifp = vif->ifp; + if (!ifp) { + brcmf_err("no if pointer provided\n"); + err = -ENOENT; + goto fail; + } + + strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1); + err = brcmf_net_attach(ifp, true); + if (err) { + brcmf_err("Registering netdevice failed\n"); + goto fail; + } + + return &ifp->vif->wdev; + +fail: + brcmf_free_vif(vif); + return ERR_PTR(err); +} + static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif) { enum nl80211_iftype iftype; @@ -545,12 +634,16 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, switch (type) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_MESH_POINT: return ERR_PTR(-EOPNOTSUPP); + case NL80211_IFTYPE_AP: + wdev = brcmf_ap_add_vif(wiphy, name, flags, params); + if (!IS_ERR(wdev)) + brcmf_cfg80211_update_proto_addr_mode(wdev); + return wdev; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: @@ -1815,6 +1908,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, return -EIO; clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state); + clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL); memcpy(&scbval.ea, &profile->bssid, ETH_ALEN); @@ -2785,6 +2879,44 @@ static __always_inline void brcmf_delay(u32 ms) } } +static s32 brcmf_config_wowl_pattern(struct brcmf_if *ifp, u8 cmd[4], + u8 *pattern, u32 patternsize, u8 *mask, + u32 packet_offset) +{ + struct brcmf_fil_wowl_pattern_le *filter; + u32 masksize; + u32 patternoffset; + u8 *buf; + u32 bufsize; + s32 ret; + + masksize = (patternsize + 7) / 8; + patternoffset = sizeof(*filter) - sizeof(filter->cmd) + masksize; + + bufsize = sizeof(*filter) + patternsize + masksize; + buf = kzalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + filter = (struct brcmf_fil_wowl_pattern_le *)buf; + + memcpy(filter->cmd, cmd, 4); + filter->masksize = cpu_to_le32(masksize); + filter->offset = cpu_to_le32(packet_offset); + filter->patternoffset = cpu_to_le32(patternoffset); + filter->patternsize = cpu_to_le32(patternsize); + filter->type = cpu_to_le32(BRCMF_WOWL_PATTERN_TYPE_BITMAP); + + if ((mask) && (masksize)) + memcpy(buf + sizeof(*filter), mask, masksize); + if ((pattern) && (patternsize)) + memcpy(buf + sizeof(*filter) + masksize, pattern, patternsize); + + ret = brcmf_fil_iovar_data_set(ifp, "wowl_pattern", buf, bufsize); + + kfree(buf); + return ret; +} + static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); @@ -2794,10 +2926,11 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) brcmf_dbg(TRACE, "Enter\n"); if (cfg->wowl_enabled) { + brcmf_configure_arp_offload(ifp, true); brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, cfg->pre_wowl_pmmode); - brcmf_fil_iovar_data_set(ifp, "wowl_pattern", "clr", 4); brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0); + brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0); cfg->wowl_enabled = false; } return 0; @@ -2808,21 +2941,29 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, struct cfg80211_wowlan *wowl) { u32 wowl_config; + u32 i; brcmf_dbg(TRACE, "Suspend, wowl config.\n"); + brcmf_configure_arp_offload(ifp, false); brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->pre_wowl_pmmode); brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX); wowl_config = 0; if (wowl->disconnect) - wowl_config |= WL_WOWL_DIS | WL_WOWL_BCN | WL_WOWL_RETR; - /* Note: if "wowl" target and not "wowlpf" then wowl_bcn_loss - * should be configured. This paramater is not supported by - * wowlpf. - */ + wowl_config = BRCMF_WOWL_DIS | BRCMF_WOWL_BCN | BRCMF_WOWL_RETR; if (wowl->magic_pkt) - wowl_config |= WL_WOWL_MAGIC; + wowl_config |= BRCMF_WOWL_MAGIC; + if ((wowl->patterns) && (wowl->n_patterns)) { + wowl_config |= BRCMF_WOWL_NET; + for (i = 0; i < wowl->n_patterns; i++) { + brcmf_config_wowl_pattern(ifp, "add", + (u8 *)wowl->patterns[i].pattern, + wowl->patterns[i].pattern_len, + (u8 *)wowl->patterns[i].mask, + wowl->patterns[i].pkt_offset); + } + } brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config); brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1); brcmf_bus_wowl_config(cfg->pub->bus_if, true); @@ -2885,7 +3026,7 @@ brcmf_update_pmklist(struct net_device *ndev, struct brcmf_cfg80211_pmk_list *pmk_list, s32 err) { int i, j; - int pmkid_len; + u32 pmkid_len; pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid); @@ -2913,8 +3054,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_if *ifp = netdev_priv(ndev); struct pmkid_list *pmkids = &cfg->pmk_list->pmkids; s32 err = 0; - int i; - int pmkid_len; + u32 pmkid_len, i; brcmf_dbg(TRACE, "Enter\n"); if (!check_vif_up(ifp->vif)) @@ -2953,7 +3093,7 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_if *ifp = netdev_priv(ndev); struct pmkid_list pmkid; s32 err = 0; - int i, pmkid_len; + u32 pmkid_len, i; brcmf_dbg(TRACE, "Enter\n"); if (!check_vif_up(ifp->vif)) @@ -3314,11 +3454,10 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie) } static s32 -brcmf_configure_wpaie(struct net_device *ndev, +brcmf_configure_wpaie(struct brcmf_if *ifp, const struct brcmf_vs_tlv *wpa_ie, bool is_rsn_ie) { - struct brcmf_if *ifp = netdev_priv(ndev); u32 auth = 0; /* d11 open authentication */ u16 count; s32 err = 0; @@ -3793,6 +3932,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype dev_role; struct brcmf_fil_bss_enable_le bss_enable; u16 chanspec; + bool mbss; brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n", settings->chandef.chan->hw_value, @@ -3803,6 +3943,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, settings->inactivity_timeout); dev_role = ifp->vif->wdev.iftype; + mbss = ifp->vif->mbss; memset(&ssid_le, 0, sizeof(ssid_le)); if (settings->ssid == NULL || settings->ssid_len == 0) { @@ -3822,8 +3963,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len); } - brcmf_set_mpc(ifp, 0); - brcmf_configure_arp_offload(ifp, false); + if (!mbss) { + brcmf_set_mpc(ifp, 0); + brcmf_configure_arp_offload(ifp, false); + } /* find the RSN_IE */ rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, @@ -3837,13 +3980,16 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_dbg(TRACE, "WPA(2) IE is found\n"); if (wpa_ie != NULL) { /* WPA IE */ - err = brcmf_configure_wpaie(ndev, wpa_ie, false); + err = brcmf_configure_wpaie(ifp, wpa_ie, false); if (err < 0) goto exit; } else { + struct brcmf_vs_tlv *tmp_ie; + + tmp_ie = (struct brcmf_vs_tlv *)rsn_ie; + /* RSN IE */ - err = brcmf_configure_wpaie(ndev, - (struct brcmf_vs_tlv *)rsn_ie, true); + err = brcmf_configure_wpaie(ifp, tmp_ie, true); if (err < 0) goto exit; } @@ -3854,45 +4000,53 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); - chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef); - err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); - if (err < 0) { - brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err); - goto exit; - } - - if (settings->beacon_interval) { - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, - settings->beacon_interval); + if (!mbss) { + chanspec = chandef_to_chanspec(&cfg->d11inf, + &settings->chandef); + err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); if (err < 0) { - brcmf_err("Beacon Interval Set Error, %d\n", err); + brcmf_err("Set Channel failed: chspec=%d, %d\n", + chanspec, err); goto exit; } - } - if (settings->dtim_period) { - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD, - settings->dtim_period); - if (err < 0) { - brcmf_err("DTIM Interval Set Error, %d\n", err); - goto exit; + + if (settings->beacon_interval) { + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, + settings->beacon_interval); + if (err < 0) { + brcmf_err("Beacon Interval Set Error, %d\n", + err); + goto exit; + } + } + if (settings->dtim_period) { + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD, + settings->dtim_period); + if (err < 0) { + brcmf_err("DTIM Interval Set Error, %d\n", err); + goto exit; + } } - } - if (dev_role == NL80211_IFTYPE_AP) { - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); + if (dev_role == NL80211_IFTYPE_AP) { + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); + if (err < 0) { + brcmf_err("BRCMF_C_DOWN error %d\n", err); + goto exit; + } + brcmf_fil_iovar_int_set(ifp, "apsta", 0); + } + + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1); if (err < 0) { - brcmf_err("BRCMF_C_DOWN error %d\n", err); + brcmf_err("SET INFRA error %d\n", err); goto exit; } - brcmf_fil_iovar_int_set(ifp, "apsta", 0); - } - - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1); - if (err < 0) { - brcmf_err("SET INFRA error %d\n", err); - goto exit; } if (dev_role == NL80211_IFTYPE_AP) { + if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss)) + brcmf_fil_iovar_int_set(ifp, "mbss", 1); + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1); if (err < 0) { brcmf_err("setting AP mode failed %d\n", err); @@ -3937,7 +4091,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); exit: - if (err) { + if ((err) && (!mbss)) { brcmf_set_mpc(ifp, 1); brcmf_configure_arp_offload(ifp, true); } @@ -3958,20 +4112,31 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) /* first to make sure they get processed by fw. */ msleep(400); + if (ifp->vif->mbss) { + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); + return err; + } + memset(&join_params, 0, sizeof(join_params)); err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, &join_params, sizeof(join_params)); if (err < 0) brcmf_err("SET SSID error (%d)\n", err); - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); if (err < 0) - brcmf_err("BRCMF_C_UP error %d\n", err); + brcmf_err("BRCMF_C_DOWN error %d\n", err); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); if (err < 0) brcmf_err("setting AP mode failed %d\n", err); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); if (err < 0) brcmf_err("setting INFRA mode failed %d\n", err); + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) + brcmf_fil_iovar_int_set(ifp, "mbss", 0); + /* Bring device back up so it can be used again */ + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1); + if (err < 0) + brcmf_err("BRCMF_C_UP error %d\n", err); } else { bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); bss_enable.enable = cpu_to_le32(0); @@ -4004,24 +4169,24 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, static int brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, - const u8 *mac) + struct station_del_parameters *params) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_scb_val_le scbval; struct brcmf_if *ifp = netdev_priv(ndev); s32 err; - if (!mac) + if (!params->mac) return -EFAULT; - brcmf_dbg(TRACE, "Enter %pM\n", mac); + brcmf_dbg(TRACE, "Enter %pM\n", params->mac); if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; if (!check_vif_up(ifp->vif)) return -EIO; - memcpy(&scbval.ea, mac, ETH_ALEN); + memcpy(&scbval.ea, params->mac, ETH_ALEN); scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING); err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, sizeof(scbval)); @@ -4323,7 +4488,9 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, enum nl80211_iftype type, bool pm_block) { + struct brcmf_cfg80211_vif *vif_walk; struct brcmf_cfg80211_vif *vif; + bool mbss; brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n", sizeof(*vif)); @@ -4339,6 +4506,17 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, brcmf_init_prof(&vif->profile); + if (type == NL80211_IFTYPE_AP) { + mbss = false; + list_for_each_entry(vif_walk, &cfg->vif_list, list) { + if (vif_walk->wdev.iftype == NL80211_IFTYPE_AP) { + mbss = true; + break; + } + } + vif->mbss = mbss; + } + list_add_tail(&vif->list, &cfg->vif_list); return vif; } @@ -4581,6 +4759,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { + struct brcmf_if *ifp = netdev_priv(ndev); static int generation; u32 event = e->event_code; u32 reason = e->reason; @@ -4591,6 +4770,8 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, ndev != cfg_to_ndev(cfg)) { brcmf_dbg(CONN, "AP mode link down\n"); complete(&cfg->vif_disabled); + if (ifp->vif->mbss) + brcmf_remove_interface(ifp->drvr, ifp->bssidx); return 0; } @@ -5382,7 +5563,28 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy) return 0; } -static const struct ieee80211_iface_limit brcmf_iface_limits[] = { +static const struct ieee80211_iface_limit brcmf_iface_limits_mbss[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) + }, + { + .max = 4, + .types = BIT(NL80211_IFTYPE_AP) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE) + } +}; + +static const struct ieee80211_iface_limit brcmf_iface_limits_sbss[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | @@ -5403,8 +5605,8 @@ static struct ieee80211_iface_combination brcmf_iface_combos[] = { { .max_interfaces = BRCMF_IFACE_MAX_CNT, .num_different_channels = 1, - .n_limits = ARRAY_SIZE(brcmf_iface_limits), - .limits = brcmf_iface_limits + .n_limits = ARRAY_SIZE(brcmf_iface_limits_sbss), + .limits = brcmf_iface_limits_sbss, } }; @@ -5446,10 +5648,13 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy) wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; } - #ifdef CONFIG_PM static const struct wiphy_wowlan_support brcmf_wowlan_support = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .n_patterns = BRCMF_WOWL_MAXPATTERNS, + .pattern_max_len = BRCMF_WOWL_MAXPATTERNSIZE, + .pattern_min_len = 1, + .max_pkt_offset = 1500, }; #endif @@ -5477,6 +5682,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) ifc_combo = brcmf_iface_combos[0]; if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN)) ifc_combo.num_different_channels = 2; + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) { + ifc_combo.n_limits = ARRAY_SIZE(brcmf_iface_limits_mbss), + ifc_combo.limits = brcmf_iface_limits_mbss; + } wiphy->iface_combinations = kmemdup(&ifc_combo, sizeof(ifc_combo), GFP_KERNEL); @@ -5613,7 +5822,8 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp) return wdev->iftype; } -bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, unsigned long state) +bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, + unsigned long state) { struct brcmf_cfg80211_vif *vif; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h similarity index 98% rename from drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h rename to drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h index 6abf94e41d3d0683b3f5e95da0fadb3b1f0f8c3b..9e98b8d5275787a5fa503e0e94911c57640ef8db 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h @@ -14,8 +14,8 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef _wl_cfg80211_h_ -#define _wl_cfg80211_h_ +#ifndef BRCMFMAC_CFG80211_H +#define BRCMFMAC_CFG80211_H /* for brcmu_d11inf */ #include @@ -183,6 +183,7 @@ struct vif_saved_ie { * @pm_block: power-management blocked. * @list: linked list. * @mgmt_rx_reg: registered rx mgmt frame types. + * @mbss: Multiple BSS type, set if not first AP (not relevant for P2P). */ struct brcmf_cfg80211_vif { struct brcmf_if *ifp; @@ -194,6 +195,7 @@ struct brcmf_cfg80211_vif { struct vif_saved_ie saved_ie; struct list_head list; u16 mgmt_rx_reg; + bool mbss; }; /* association inform */ @@ -480,7 +482,8 @@ const struct brcmf_tlv * brcmf_parse_tlvs(const void *buf, int buflen, uint key); u16 channel_to_chanspec(struct brcmu_d11inf *d11inf, struct ieee80211_channel *ch); -bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, unsigned long state); +bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, + unsigned long state); void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg, struct brcmf_cfg80211_vif *vif); bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg); @@ -493,4 +496,4 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc); void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg); void brcmf_cfg80211_free_netdev(struct net_device *ndev); -#endif /* _wl_cfg80211_h_ */ +#endif /* BRCMFMAC_CFG80211_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c index 95efde868db82e6b36e0c954fe4d0ad2cd797721..ddae0b5e56eced02eed599d41decd62ab7b2e273 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c @@ -25,7 +25,7 @@ #include #include #include -#include "dhd_dbg.h" +#include "debug.h" #include "chip.h" /* SOC Interconnect types (aka chip types) */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.c b/drivers/net/wireless/brcm80211/brcmfmac/common.c new file mode 100644 index 0000000000000000000000000000000000000000..1861a13e8d03d6157455001ac5a37fb18de1dc63 --- /dev/null +++ b/drivers/net/wireless/brcm80211/brcmfmac/common.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "core.h" +#include "bus.h" +#include "debug.h" +#include "fwil.h" +#include "fwil_types.h" +#include "tracepoint.h" + +#define BRCMF_DEFAULT_BCN_TIMEOUT 3 +#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 +#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 + +/* boost value for RSSI_DELTA in preferred join selection */ +#define BRCMF_JOIN_PREF_RSSI_BOOST 8 + +int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) +{ + s8 eventmask[BRCMF_EVENTING_MASK_LEN]; + u8 buf[BRCMF_DCMD_SMLEN]; + struct brcmf_join_pref_params join_pref_params[2]; + char *ptr; + s32 err; + + /* retreive mac address */ + err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr, + sizeof(ifp->mac_addr)); + if (err < 0) { + brcmf_err("Retreiving cur_etheraddr failed, %d\n", + err); + goto done; + } + memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac)); + + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + strcpy(buf, "ver"); + err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf)); + if (err < 0) { + brcmf_err("Retreiving version information failed, %d\n", + err); + goto done; + } + ptr = (char *)buf; + strsep(&ptr, "\n"); + + /* Print fw version info */ + brcmf_err("Firmware version = %s\n", buf); + + /* locate firmware version number for ethtool */ + ptr = strrchr(buf, ' ') + 1; + strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver)); + + /* set mpc */ + err = brcmf_fil_iovar_int_set(ifp, "mpc", 1); + if (err) { + brcmf_err("failed setting mpc\n"); + goto done; + } + + /* + * Setup timeout if Beacons are lost and roam is off to report + * link down + */ + err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", + BRCMF_DEFAULT_BCN_TIMEOUT); + if (err) { + brcmf_err("bcn_timeout error (%d)\n", err); + goto done; + } + + /* Enable/Disable build-in roaming to allowed ext supplicant to take + * of romaing + */ + err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1); + if (err) { + brcmf_err("roam_off error (%d)\n", err); + goto done; + } + + /* Setup join_pref to select target by RSSI(with boost on 5GHz) */ + join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA; + join_pref_params[0].len = 2; + join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST; + join_pref_params[0].band = WLC_BAND_5G; + join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI; + join_pref_params[1].len = 2; + join_pref_params[1].rssi_gain = 0; + join_pref_params[1].band = 0; + err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params, + sizeof(join_pref_params)); + if (err) + brcmf_err("Set join_pref error (%d)\n", err); + + /* Setup event_msgs, enable E_IF */ + err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask, + BRCMF_EVENTING_MASK_LEN); + if (err) { + brcmf_err("Get event_msgs error (%d)\n", err); + goto done; + } + setbit(eventmask, BRCMF_E_IF); + err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask, + BRCMF_EVENTING_MASK_LEN); + if (err) { + brcmf_err("Set event_msgs error (%d)\n", err); + goto done; + } + + /* Setup default scan channel time */ + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME, + BRCMF_DEFAULT_SCAN_CHANNEL_TIME); + if (err) { + brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n", + err); + goto done; + } + + /* Setup default scan unassoc time */ + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME, + BRCMF_DEFAULT_SCAN_UNASSOC_TIME); + if (err) { + brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n", + err); + goto done; + } + + /* do bus specific preinit here */ + err = brcmf_bus_preinit(ifp->drvr->bus_if); +done: + return err; +} + +#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG) +void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + if (brcmf_msg_level & level) + pr_debug("%s %pV", func, &vaf); + trace_brcmf_dbg(level, func, &vaf); + va_end(args); +} +#endif diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c index c6d65b8e1e1565c6aaa9fed7b9af5902516d855d..77656c711bedbfc44a8a7ddf97b58cb3efabf24a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c @@ -19,7 +19,7 @@ #include #include -#include "dhd.h" +#include "core.h" #include "commonring.h" diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c similarity index 96% rename from drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c rename to drivers/net/wireless/brcm80211/brcmfmac/core.c index fb1043908a23eec22c71845d83686b81c872da21..effe6d7831d986f7f30ae89370ad4f58c6cdeadf 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c @@ -22,12 +22,12 @@ #include #include -#include "dhd.h" -#include "dhd_bus.h" -#include "dhd_dbg.h" +#include "core.h" +#include "bus.h" +#include "debug.h" #include "fwil_types.h" #include "p2p.h" -#include "wl_cfg80211.h" +#include "cfg80211.h" #include "fwil.h" #include "fwsignal.h" #include "feature.h" @@ -836,7 +836,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, return ifp; } -void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) +static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) { struct brcmf_if *ifp; @@ -869,6 +869,38 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx) } } +void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx) +{ + if (drvr->iflist[bssidx]) { + brcmf_fws_del_interface(drvr->iflist[bssidx]); + brcmf_del_if(drvr, bssidx); + } +} + +int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr) +{ + int ifidx; + int bsscfgidx; + bool available; + int highest; + + available = false; + bsscfgidx = 2; + highest = 2; + for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) { + if (drvr->iflist[ifidx]) { + if (drvr->iflist[ifidx]->bssidx == bsscfgidx) + bsscfgidx = highest + 1; + else if (drvr->iflist[ifidx]->bssidx > highest) + highest = drvr->iflist[ifidx]->bssidx; + } else { + available = true; + } + } + + return available ? bsscfgidx : -ENOMEM; +} + int brcmf_attach(struct device *dev) { struct brcmf_pub *drvr = NULL; @@ -1033,10 +1065,7 @@ void brcmf_detach(struct device *dev) /* make sure primary interface removed last */ for (i = BRCMF_MAX_IFS-1; i > -1; i--) - if (drvr->iflist[i]) { - brcmf_fws_del_interface(drvr->iflist[i]); - brcmf_del_if(drvr, i); - } + brcmf_remove_interface(drvr, i); brcmf_cfg80211_detach(drvr->config); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h similarity index 96% rename from drivers/net/wireless/brcm80211/brcmfmac/dhd.h rename to drivers/net/wireless/brcm80211/brcmfmac/core.h index 5e4317dbc2b0a14b00c464c5e3e1f6e30689a046..23f74b139cc8ef13e845057e165ec329e7d639c4 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h @@ -18,8 +18,8 @@ * Common types * */ -#ifndef _BRCMF_H_ -#define _BRCMF_H_ +#ifndef BRCMFMAC_CORE_H +#define BRCMFMAC_CORE_H #include "fweh.h" @@ -83,7 +83,6 @@ struct brcmf_pub { /* Internal brcmf items */ uint hdrlen; /* Total BRCMF header length (proto + bus) */ uint rxsz; /* Rx buffer size bus module should use */ - u8 wme_dp; /* wme discard priority */ /* Dongle media info */ char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN]; @@ -176,7 +175,8 @@ char *brcmf_ifname(struct brcmf_pub *drvr, int idx); int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked); struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, char *name, u8 *mac_addr); -void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx); +void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx); +int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr); void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state); void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx, @@ -186,4 +186,4 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); /* Sets dongle media info (drv_version, mac address). */ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp); -#endif /* _BRCMF_H_ */ +#endif /* BRCMFMAC_CORE_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/debug.c similarity index 98% rename from drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c rename to drivers/net/wireless/brcm80211/brcmfmac/debug.c index be9f4f829192cc020294f08950b0fe995fca10dc..9b473d50b0058d68ba8cf60c02f4cbd55b60edb0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.c @@ -19,9 +19,9 @@ #include #include -#include "dhd.h" -#include "dhd_bus.h" -#include "dhd_dbg.h" +#include "core.h" +#include "bus.h" +#include "debug.h" static struct dentry *root_folder; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/debug.h similarity index 98% rename from drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h rename to drivers/net/wireless/brcm80211/brcmfmac/debug.h index dec40d316c829129cfd86b5043cfc3fbdedb03b4..eb0b8c47479d1f7acaadd1ca2f129e2fa07dbe2c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.h @@ -14,8 +14,8 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef _BRCMF_DBG_H_ -#define _BRCMF_DBG_H_ +#ifndef BRCMFMAC_DEBUG_H +#define BRCMFMAC_DEBUG_H /* message levels */ #define BRCMF_TRACE_VAL 0x00000002 @@ -133,4 +133,4 @@ int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, } #endif -#endif /* _BRCMF_DBG_H_ */ +#endif /* BRCMFMAC_DEBUG_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c deleted file mode 100644 index d991f8e3d9ece11e6882d4569e58311ec689c0a5..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ /dev/null @@ -1,400 +0,0 @@ -/* - * Copyright (c) 2010 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include "dhd.h" -#include "dhd_bus.h" -#include "dhd_dbg.h" -#include "fwil.h" -#include "fwil_types.h" -#include "tracepoint.h" - -#define PKTFILTER_BUF_SIZE 128 -#define BRCMF_DEFAULT_BCN_TIMEOUT 3 -#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 -#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 -#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00" - -/* boost value for RSSI_DELTA in preferred join selection */ -#define BRCMF_JOIN_PREF_RSSI_BOOST 8 - - -bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, - struct sk_buff *pkt, int prec) -{ - struct sk_buff *p; - int eprec = -1; /* precedence to evict from */ - bool discard_oldest; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; - - /* Fast case, precedence queue is not full and we are also not - * exceeding total queue length - */ - if (!pktq_pfull(q, prec) && !pktq_full(q)) { - brcmu_pktq_penq(q, prec, pkt); - return true; - } - - /* Determine precedence from which to evict packet, if any */ - if (pktq_pfull(q, prec)) - eprec = prec; - else if (pktq_full(q)) { - p = brcmu_pktq_peek_tail(q, &eprec); - if (eprec > prec) - return false; - } - - /* Evict if needed */ - if (eprec >= 0) { - /* Detect queueing to unconfigured precedence */ - discard_oldest = ac_bitmap_tst(drvr->wme_dp, eprec); - if (eprec == prec && !discard_oldest) - return false; /* refuse newer (incoming) packet */ - /* Evict packet according to discard policy */ - p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) : - brcmu_pktq_pdeq_tail(q, eprec); - if (p == NULL) - brcmf_err("brcmu_pktq_penq() failed, oldest %d\n", - discard_oldest); - - brcmu_pkt_buf_free_skb(p); - } - - /* Enqueue */ - p = brcmu_pktq_penq(q, prec, pkt); - if (p == NULL) - brcmf_err("brcmu_pktq_penq() failed\n"); - - return p != NULL; -} - -/* Convert user's input in hex pattern to byte-size mask */ -static int brcmf_c_pattern_atoh(char *src, char *dst) -{ - int i; - if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) { - brcmf_err("Mask invalid format. Needs to start with 0x\n"); - return -EINVAL; - } - src = src + 2; /* Skip past 0x */ - if (strlen(src) % 2 != 0) { - brcmf_err("Mask invalid format. Length must be even.\n"); - return -EINVAL; - } - for (i = 0; *src != '\0'; i++) { - unsigned long res; - char num[3]; - strncpy(num, src, 2); - num[2] = '\0'; - if (kstrtoul(num, 16, &res)) - return -EINVAL; - dst[i] = (u8)res; - src += 2; - } - return i; -} - -static void -brcmf_c_pktfilter_offload_enable(struct brcmf_if *ifp, char *arg, int enable, - int master_mode) -{ - unsigned long res; - char *argv; - char *arg_save = NULL, *arg_org = NULL; - s32 err; - struct brcmf_pkt_filter_enable_le enable_parm; - - arg_save = kstrdup(arg, GFP_ATOMIC); - if (!arg_save) - goto fail; - - arg_org = arg_save; - - argv = strsep(&arg_save, " "); - - if (argv == NULL) { - brcmf_err("No args provided\n"); - goto fail; - } - - /* Parse packet filter id. */ - enable_parm.id = 0; - if (!kstrtoul(argv, 0, &res)) - enable_parm.id = cpu_to_le32((u32)res); - - /* Enable/disable the specified filter. */ - enable_parm.enable = cpu_to_le32(enable); - - err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_enable", &enable_parm, - sizeof(enable_parm)); - if (err) - brcmf_err("Set pkt_filter_enable error (%d)\n", err); - - /* Control the master mode */ - err = brcmf_fil_iovar_int_set(ifp, "pkt_filter_mode", master_mode); - if (err) - brcmf_err("Set pkt_filter_mode error (%d)\n", err); - -fail: - kfree(arg_org); -} - -static void brcmf_c_pktfilter_offload_set(struct brcmf_if *ifp, char *arg) -{ - struct brcmf_pkt_filter_le *pkt_filter; - unsigned long res; - int buf_len; - s32 err; - u32 mask_size; - u32 pattern_size; - char *argv[8], *buf = NULL; - int i = 0; - char *arg_save = NULL, *arg_org = NULL; - - arg_save = kstrdup(arg, GFP_ATOMIC); - if (!arg_save) - goto fail; - - arg_org = arg_save; - - buf = kmalloc(PKTFILTER_BUF_SIZE, GFP_ATOMIC); - if (!buf) - goto fail; - - argv[i] = strsep(&arg_save, " "); - while (argv[i]) { - i++; - if (i >= 8) { - brcmf_err("Too many parameters\n"); - goto fail; - } - argv[i] = strsep(&arg_save, " "); - } - - if (i != 6) { - brcmf_err("Not enough args provided %d\n", i); - goto fail; - } - - pkt_filter = (struct brcmf_pkt_filter_le *)buf; - - /* Parse packet filter id. */ - pkt_filter->id = 0; - if (!kstrtoul(argv[0], 0, &res)) - pkt_filter->id = cpu_to_le32((u32)res); - - /* Parse filter polarity. */ - pkt_filter->negate_match = 0; - if (!kstrtoul(argv[1], 0, &res)) - pkt_filter->negate_match = cpu_to_le32((u32)res); - - /* Parse filter type. */ - pkt_filter->type = 0; - if (!kstrtoul(argv[2], 0, &res)) - pkt_filter->type = cpu_to_le32((u32)res); - - /* Parse pattern filter offset. */ - pkt_filter->u.pattern.offset = 0; - if (!kstrtoul(argv[3], 0, &res)) - pkt_filter->u.pattern.offset = cpu_to_le32((u32)res); - - /* Parse pattern filter mask. */ - mask_size = brcmf_c_pattern_atoh(argv[4], - (char *)pkt_filter->u.pattern.mask_and_pattern); - - /* Parse pattern filter pattern. */ - pattern_size = brcmf_c_pattern_atoh(argv[5], - (char *)&pkt_filter->u.pattern.mask_and_pattern[mask_size]); - - if (mask_size != pattern_size) { - brcmf_err("Mask and pattern not the same size\n"); - goto fail; - } - - pkt_filter->u.pattern.size_bytes = cpu_to_le32(mask_size); - buf_len = offsetof(struct brcmf_pkt_filter_le, - u.pattern.mask_and_pattern); - buf_len += mask_size + pattern_size; - - err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_add", pkt_filter, - buf_len); - if (err) - brcmf_err("Set pkt_filter_add error (%d)\n", err); - -fail: - kfree(arg_org); - - kfree(buf); -} - -int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) -{ - s8 eventmask[BRCMF_EVENTING_MASK_LEN]; - u8 buf[BRCMF_DCMD_SMLEN]; - struct brcmf_join_pref_params join_pref_params[2]; - char *ptr; - s32 err; - - /* retreive mac address */ - err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr, - sizeof(ifp->mac_addr)); - if (err < 0) { - brcmf_err("Retreiving cur_etheraddr failed, %d\n", - err); - goto done; - } - memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac)); - - /* query for 'ver' to get version info from firmware */ - memset(buf, 0, sizeof(buf)); - strcpy(buf, "ver"); - err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf)); - if (err < 0) { - brcmf_err("Retreiving version information failed, %d\n", - err); - goto done; - } - ptr = (char *)buf; - strsep(&ptr, "\n"); - - /* Print fw version info */ - brcmf_err("Firmware version = %s\n", buf); - - /* locate firmware version number for ethtool */ - ptr = strrchr(buf, ' ') + 1; - strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver)); - - /* set mpc */ - err = brcmf_fil_iovar_int_set(ifp, "mpc", 1); - if (err) { - brcmf_err("failed setting mpc\n"); - goto done; - } - - /* - * Setup timeout if Beacons are lost and roam is off to report - * link down - */ - err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", - BRCMF_DEFAULT_BCN_TIMEOUT); - if (err) { - brcmf_err("bcn_timeout error (%d)\n", err); - goto done; - } - - /* Enable/Disable build-in roaming to allowed ext supplicant to take - * of romaing - */ - err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1); - if (err) { - brcmf_err("roam_off error (%d)\n", err); - goto done; - } - - /* Setup join_pref to select target by RSSI(with boost on 5GHz) */ - join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA; - join_pref_params[0].len = 2; - join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST; - join_pref_params[0].band = WLC_BAND_5G; - join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI; - join_pref_params[1].len = 2; - join_pref_params[1].rssi_gain = 0; - join_pref_params[1].band = 0; - err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params, - sizeof(join_pref_params)); - if (err) - brcmf_err("Set join_pref error (%d)\n", err); - - /* Setup event_msgs, enable E_IF */ - err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask, - BRCMF_EVENTING_MASK_LEN); - if (err) { - brcmf_err("Get event_msgs error (%d)\n", err); - goto done; - } - setbit(eventmask, BRCMF_E_IF); - err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask, - BRCMF_EVENTING_MASK_LEN); - if (err) { - brcmf_err("Set event_msgs error (%d)\n", err); - goto done; - } - - /* Setup default scan channel time */ - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME, - BRCMF_DEFAULT_SCAN_CHANNEL_TIME); - if (err) { - brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n", - err); - goto done; - } - - /* Setup default scan unassoc time */ - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME, - BRCMF_DEFAULT_SCAN_UNASSOC_TIME); - if (err) { - brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n", - err); - goto done; - } - - /* Setup packet filter */ - brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER); - brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER, - 0, true); - - /* do bus specific preinit here */ - err = brcmf_bus_preinit(ifp->drvr->bus_if); -done: - return err; -} - -#ifdef CONFIG_BRCM_TRACING -void __brcmf_err(const char *func, const char *fmt, ...) -{ - struct va_format vaf = { - .fmt = fmt, - }; - va_list args; - - va_start(args, fmt); - vaf.va = &args; - pr_err("%s: %pV", func, &vaf); - trace_brcmf_err(func, &vaf); - va_end(args); -} -#endif -#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG) -void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...) -{ - struct va_format vaf = { - .fmt = fmt, - }; - va_list args; - - va_start(args, fmt); - vaf.va = &args; - if (brcmf_msg_level & level) - pr_debug("%s %pV", func, &vaf); - trace_brcmf_dbg(level, func, &vaf); - va_end(args); -} -#endif diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c index aed53acef456a4404e2f26c9273c2ea966ea5605..defb7a44e0bc1ff554195890dcccc31bfb0c9769 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c @@ -17,17 +17,12 @@ #include #include -#include "dhd.h" -#include "dhd_bus.h" -#include "dhd_dbg.h" +#include "core.h" +#include "bus.h" +#include "debug.h" #include "fwil.h" #include "feature.h" -/* - * firmware error code received if iovar is unsupported. - */ -#define EBRCMF_FEAT_UNSUPPORTED 23 - /* * expand feature list to array of feature strings. */ @@ -102,6 +97,28 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp, } } +/** + * brcmf_feat_iovar_int_set() - determine feature through iovar set. + * + * @ifp: interface to query. + * @id: feature id. + * @name: iovar name. + */ +static void brcmf_feat_iovar_int_set(struct brcmf_if *ifp, + enum brcmf_feat_id id, char *name, u32 val) +{ + int err; + + err = brcmf_fil_iovar_int_set(ifp, name, val); + if (err == 0) { + brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); + ifp->drvr->feat_flags |= BIT(id); + } else { + brcmf_dbg(TRACE, "%s feature check failed: %d\n", + brcmf_feat_names[id], err); + } +} + void brcmf_feat_attach(struct brcmf_pub *drvr) { struct brcmf_if *ifp = drvr->iflist[0]; @@ -109,6 +126,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); if (drvr->bus_if->wowl_supported) brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); + brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); /* set chip related quirks */ switch (drvr->bus_if->chip) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/brcm80211/brcmfmac/feature.h index b9a796d0a44d9d1f3b8b01092d76937998dccd09..f5832e077bb7999344877f5fd3bf792cad2ce298 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.h @@ -22,6 +22,7 @@ * MCHAN: multi-channel for concurrent P2P. */ #define BRCMF_FEAT_LIST \ + BRCMF_FEAT_DEF(MBSS) \ BRCMF_FEAT_DEF(MCHAN) \ BRCMF_FEAT_DEF(WOWL) /* diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c index 8ea9f283d2b8065f4766978c2e43e05e6576fba6..1ff787d1a36be97848b412cdbc1246b5b3d8afe9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c @@ -20,7 +20,7 @@ #include #include -#include "dhd_dbg.h" +#include "debug.h" #include "firmware.h" char brcmf_firmware_path[BRCMF_FW_PATH_LEN]; @@ -262,8 +262,7 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) fail: brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); - if (fwctx->code) - release_firmware(fwctx->code); + release_firmware(fwctx->code); device_release_driver(fwctx->dev); kfree(fwctx); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c index 1faa929f5fff2c5fcfe667d7b585ca8621448d5a..44f3a84d1999c6b9e93f116d1b5f27a706546164 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c @@ -19,9 +19,9 @@ #include #include -#include "dhd.h" -#include "dhd_dbg.h" -#include "dhd_bus.h" +#include "core.h" +#include "debug.h" +#include "bus.h" #include "proto.h" #include "flowring.h" #include "msgbuf.h" diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 44fc85f68f7aceba2788da99126164b0a946a6a7..ec62492ffa69fd3fefd6481b2ec2e152167b0e36 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c @@ -18,8 +18,8 @@ #include "brcmu_wifi.h" #include "brcmu_utils.h" -#include "dhd.h" -#include "dhd_dbg.h" +#include "core.h" +#include "debug.h" #include "tracepoint.h" #include "fwsignal.h" #include "fweh.h" @@ -221,10 +221,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); - if (ifp && ifevent->action == BRCMF_E_IF_DEL) { - brcmf_fws_del_interface(ifp); - brcmf_del_if(drvr, ifevent->bssidx); - } + if (ifp && ifevent->action == BRCMF_E_IF_DEL) + brcmf_remove_interface(drvr, ifevent->bssidx); } /** diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c index ded328f80cd1237454274ce53fd39433249b0e85..03f2c406a17bb034f9e7078f8c5c65344f316a9b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c @@ -22,9 +22,9 @@ #include #include #include -#include "dhd.h" -#include "dhd_bus.h" -#include "dhd_dbg.h" +#include "core.h" +#include "bus.h" +#include "debug.h" #include "tracepoint.h" #include "fwil.h" #include "proto.h" @@ -32,6 +32,76 @@ #define MAX_HEX_DUMP_LEN 64 +#ifdef DEBUG +static const char * const brcmf_fil_errstr[] = { + "BCME_OK", + "BCME_ERROR", + "BCME_BADARG", + "BCME_BADOPTION", + "BCME_NOTUP", + "BCME_NOTDOWN", + "BCME_NOTAP", + "BCME_NOTSTA", + "BCME_BADKEYIDX", + "BCME_RADIOOFF", + "BCME_NOTBANDLOCKED", + "BCME_NOCLK", + "BCME_BADRATESET", + "BCME_BADBAND", + "BCME_BUFTOOSHORT", + "BCME_BUFTOOLONG", + "BCME_BUSY", + "BCME_NOTASSOCIATED", + "BCME_BADSSIDLEN", + "BCME_OUTOFRANGECHAN", + "BCME_BADCHAN", + "BCME_BADADDR", + "BCME_NORESOURCE", + "BCME_UNSUPPORTED", + "BCME_BADLEN", + "BCME_NOTREADY", + "BCME_EPERM", + "BCME_NOMEM", + "BCME_ASSOCIATED", + "BCME_RANGE", + "BCME_NOTFOUND", + "BCME_WME_NOT_ENABLED", + "BCME_TSPEC_NOTFOUND", + "BCME_ACM_NOTSUPPORTED", + "BCME_NOT_WME_ASSOCIATION", + "BCME_SDIO_ERROR", + "BCME_DONGLE_DOWN", + "BCME_VERSION", + "BCME_TXFAIL", + "BCME_RXFAIL", + "BCME_NODEVICE", + "BCME_NMODE_DISABLED", + "BCME_NONRESIDENT", + "BCME_SCANREJECT", + "BCME_USAGE_ERROR", + "BCME_IOCTL_ERROR", + "BCME_SERIAL_PORT_ERR", + "BCME_DISABLED", + "BCME_DECERR", + "BCME_ENCERR", + "BCME_MICERR", + "BCME_REPLAY", + "BCME_IE_NOTFOUND", +}; + +static const char *brcmf_fil_get_errstr(u32 err) +{ + if (err >= ARRAY_SIZE(brcmf_fil_errstr)) + return "(unknown)"; + + return brcmf_fil_errstr[err]; +} +#else +static const char *brcmf_fil_get_errstr(u32 err) +{ + return ""; +} +#endif /* DEBUG */ static s32 brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set) @@ -52,11 +122,11 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set) err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len); if (err >= 0) - err = 0; - else - brcmf_dbg(FIL, "Failed err=%d\n", err); + return 0; - return err; + brcmf_dbg(FIL, "Failed: %s (%d)\n", + brcmf_fil_get_errstr((u32)(-err)), err); + return -EBADE; } s32 @@ -66,7 +136,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len) mutex_lock(&ifp->drvr->proto_block); - brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len); + brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); @@ -84,7 +154,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len) mutex_lock(&ifp->drvr->proto_block); err = brcmf_fil_cmd_data(ifp, cmd, data, len, false); - brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len); + brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); @@ -101,7 +171,7 @@ brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data) __le32 data_le = cpu_to_le32(data); mutex_lock(&ifp->drvr->proto_block); - brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, data); + brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data); err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true); mutex_unlock(&ifp->drvr->proto_block); @@ -118,7 +188,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data) err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false); mutex_unlock(&ifp->drvr->proto_block); *data = le32_to_cpu(data_le); - brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, *data); + brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data); return err; } @@ -154,7 +224,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data, mutex_lock(&drvr->proto_block); - brcmf_dbg(FIL, "name=%s, len=%d\n", name, len); + brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); @@ -194,7 +264,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data, brcmf_err("Creating iovar failed\n"); } - brcmf_dbg(FIL, "name=%s, len=%d\n", name, len); + brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); @@ -277,7 +347,8 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, mutex_lock(&drvr->proto_block); - brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len); + brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx, + ifp->bssidx, name, len); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); @@ -316,7 +387,8 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, err = -EPERM; brcmf_err("Creating bsscfg failed\n"); } - brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len); + brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx, + ifp->bssidx, name, len); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h index 5ff5cd0bb0325f0d23c2ad1359b113b8f69b2cc8..50891c02c4c162640acca7dc44ed56aec27b7385 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h @@ -55,59 +55,63 @@ /* WOWL bits */ /* Wakeup on Magic packet: */ -#define WL_WOWL_MAGIC (1 << 0) +#define BRCMF_WOWL_MAGIC (1 << 0) /* Wakeup on Netpattern */ -#define WL_WOWL_NET (1 << 1) +#define BRCMF_WOWL_NET (1 << 1) /* Wakeup on loss-of-link due to Disassoc/Deauth: */ -#define WL_WOWL_DIS (1 << 2) +#define BRCMF_WOWL_DIS (1 << 2) /* Wakeup on retrograde TSF: */ -#define WL_WOWL_RETR (1 << 3) +#define BRCMF_WOWL_RETR (1 << 3) /* Wakeup on loss of beacon: */ -#define WL_WOWL_BCN (1 << 4) +#define BRCMF_WOWL_BCN (1 << 4) /* Wakeup after test: */ -#define WL_WOWL_TST (1 << 5) +#define BRCMF_WOWL_TST (1 << 5) /* Wakeup after PTK refresh: */ -#define WL_WOWL_M1 (1 << 6) +#define BRCMF_WOWL_M1 (1 << 6) /* Wakeup after receipt of EAP-Identity Req: */ -#define WL_WOWL_EAPID (1 << 7) +#define BRCMF_WOWL_EAPID (1 << 7) /* Wakeind via PME(0) or GPIO(1): */ -#define WL_WOWL_PME_GPIO (1 << 8) +#define BRCMF_WOWL_PME_GPIO (1 << 8) /* need tkip phase 1 key to be updated by the driver: */ -#define WL_WOWL_NEEDTKIP1 (1 << 9) +#define BRCMF_WOWL_NEEDTKIP1 (1 << 9) /* enable wakeup if GTK fails: */ -#define WL_WOWL_GTK_FAILURE (1 << 10) +#define BRCMF_WOWL_GTK_FAILURE (1 << 10) /* support extended magic packets: */ -#define WL_WOWL_EXTMAGPAT (1 << 11) +#define BRCMF_WOWL_EXTMAGPAT (1 << 11) /* support ARP/NS/keepalive offloading: */ -#define WL_WOWL_ARPOFFLOAD (1 << 12) +#define BRCMF_WOWL_ARPOFFLOAD (1 << 12) /* read protocol version for EAPOL frames: */ -#define WL_WOWL_WPA2 (1 << 13) +#define BRCMF_WOWL_WPA2 (1 << 13) /* If the bit is set, use key rotaton: */ -#define WL_WOWL_KEYROT (1 << 14) +#define BRCMF_WOWL_KEYROT (1 << 14) /* If the bit is set, frm received was bcast frame: */ -#define WL_WOWL_BCAST (1 << 15) +#define BRCMF_WOWL_BCAST (1 << 15) /* If the bit is set, scan offload is enabled: */ -#define WL_WOWL_SCANOL (1 << 16) +#define BRCMF_WOWL_SCANOL (1 << 16) /* Wakeup on tcpkeep alive timeout: */ -#define WL_WOWL_TCPKEEP_TIME (1 << 17) +#define BRCMF_WOWL_TCPKEEP_TIME (1 << 17) /* Wakeup on mDNS Conflict Resolution: */ -#define WL_WOWL_MDNS_CONFLICT (1 << 18) +#define BRCMF_WOWL_MDNS_CONFLICT (1 << 18) /* Wakeup on mDNS Service Connect: */ -#define WL_WOWL_MDNS_SERVICE (1 << 19) +#define BRCMF_WOWL_MDNS_SERVICE (1 << 19) /* tcp keepalive got data: */ -#define WL_WOWL_TCPKEEP_DATA (1 << 20) +#define BRCMF_WOWL_TCPKEEP_DATA (1 << 20) /* Firmware died in wowl mode: */ -#define WL_WOWL_FW_HALT (1 << 21) +#define BRCMF_WOWL_FW_HALT (1 << 21) /* Enable detection of radio button changes: */ -#define WL_WOWL_ENAB_HWRADIO (1 << 22) +#define BRCMF_WOWL_ENAB_HWRADIO (1 << 22) /* Offloads detected MIC failure(s): */ -#define WL_WOWL_MIC_FAIL (1 << 23) +#define BRCMF_WOWL_MIC_FAIL (1 << 23) /* Wakeup in Unassociated state (Net/Magic Pattern): */ -#define WL_WOWL_UNASSOC (1 << 24) +#define BRCMF_WOWL_UNASSOC (1 << 24) /* Wakeup if received matched secured pattern: */ -#define WL_WOWL_SECURE (1 << 25) +#define BRCMF_WOWL_SECURE (1 << 25) /* Link Down indication in WoWL mode: */ -#define WL_WOWL_LINKDOWN (1 << 31) +#define BRCMF_WOWL_LINKDOWN (1 << 31) + +#define BRCMF_WOWL_MAXPATTERNS 8 +#define BRCMF_WOWL_MAXPATTERNSIZE 128 + /* join preference types for join_pref iovar */ enum brcmf_join_pref_types { @@ -124,6 +128,12 @@ enum brcmf_fil_p2p_if_types { BRCMF_FIL_P2P_IF_DEV, }; +enum brcmf_wowl_pattern_type { + BRCMF_WOWL_PATTERN_TYPE_BITMAP = 0, + BRCMF_WOWL_PATTERN_TYPE_ARP, + BRCMF_WOWL_PATTERN_TYPE_NA +}; + struct brcmf_fil_p2p_if_le { u8 addr[ETH_ALEN]; __le16 type; @@ -484,4 +494,35 @@ struct brcmf_rx_mgmt_data { __be32 rate; }; +/** + * struct brcmf_fil_wowl_pattern_le - wowl pattern configuration struct. + * + * @cmd: "add", "del" or "clr". + * @masksize: Size of the mask in #of bytes + * @offset: Pattern byte offset in packet + * @patternoffset: Offset of start of pattern. Starting from field masksize. + * @patternsize: Size of the pattern itself in #of bytes + * @id: id + * @reasonsize: Size of the wakeup reason code + * @type: Type of pattern (enum brcmf_wowl_pattern_type) + */ +struct brcmf_fil_wowl_pattern_le { + u8 cmd[4]; + __le32 masksize; + __le32 offset; + __le32 patternoffset; + __le32 patternsize; + __le32 id; + __le32 reasonsize; + __le32 type; + /* u8 mask[] - Mask follows the structure above */ + /* u8 pattern[] - Pattern follows the mask is at 'patternoffset' */ +}; + +struct brcmf_mbss_ssid_le { + __le32 bsscfgidx; + __le32 SSID_len; + unsigned char SSID[32]; +}; + #endif /* FWIL_TYPES_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index 183f08d7fc8c8e1ece4a6965fd604313385580f4..f0dda0ecd23b4395219f31ef88475329e0ab69f2 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -26,15 +26,15 @@ #include #include -#include "dhd.h" -#include "dhd_dbg.h" -#include "dhd_bus.h" +#include "core.h" +#include "debug.h" +#include "bus.h" #include "fwil.h" #include "fwil_types.h" #include "fweh.h" #include "fwsignal.h" #include "p2p.h" -#include "wl_cfg80211.h" +#include "cfg80211.h" #include "proto.h" /** diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 11cc051f97cd42152ed6530257e1c1fcfbcf46e4..456944a6a2db8877ff2a73d20d2046b832e468f4 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -24,13 +24,13 @@ #include #include -#include "dhd.h" -#include "dhd_dbg.h" +#include "core.h" +#include "debug.h" #include "proto.h" #include "msgbuf.h" #include "commonring.h" #include "flowring.h" -#include "dhd_bus.h" +#include "bus.h" #include "tracepoint.h" @@ -518,8 +518,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? len : msgbuf->ioctl_resp_ret_len); } - if (skb) - brcmu_pkt_buf_free_skb(skb); + brcmu_pkt_buf_free_skb(skb); return msgbuf->ioctl_resp_status; } @@ -1081,8 +1080,17 @@ brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb, { struct brcmf_if *ifp; + /* The ifidx is the idx to map to matching netdev/ifp. When receiving + * events this is easy because it contains the bssidx which maps + * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd. + * bssidx 1 is used for p2p0 and no data can be received or + * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0 + */ + if (ifidx) + (ifidx)++; ifp = msgbuf->drvr->iflist[ifidx]; if (!ifp || !ifp->ndev) { + brcmf_err("Received pkt for invalid ifidx %d\n", ifidx); brcmu_pkt_buf_free_skb(skb); return; } @@ -1355,6 +1363,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) } INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings); + count = count * sizeof(unsigned long); msgbuf->flow_map = kzalloc(count, GFP_KERNEL); if (!msgbuf->flow_map) goto fail; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c index 927bffd5be6487125ca5f3e269b5eb0d690fb2f7..c824570ddea3208b1f20230a6eecbc193dd68c50 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c @@ -21,8 +21,8 @@ #include #include -#include "dhd_dbg.h" -#include "sdio_host.h" +#include "debug.h" +#include "sdio.h" void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index d54c58a32faa52bf9b6dc2cb36edd35c07993302..effb48ebd86450c41d7a3a4d46b85df4f049b946 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -21,12 +21,12 @@ #include #include #include -#include -#include +#include "core.h" +#include "debug.h" #include "fwil.h" #include "fwil_types.h" #include "p2p.h" -#include "wl_cfg80211.h" +#include "cfg80211.h" /* parameters used for p2p escan */ #define P2PAPI_SCAN_NPROBES 1 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index 16fef3382019192d90462835625011075b33a2ec..905991fdb7b101a5dec4694d898a2b394cef06af 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -30,8 +30,8 @@ #include #include -#include "dhd_dbg.h" -#include "dhd_bus.h" +#include "debug.h" +#include "bus.h" #include "commonring.h" #include "msgbuf.h" #include "pcie.h" @@ -798,12 +798,14 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) brcmf_dbg(PCIE, "Enter\n"); /* is it a v1 or v2 implementation */ devinfo->irq_requested = false; + pci_enable_msi(pdev); if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr_v1, brcmf_pcie_isr_thread_v1, IRQF_SHARED, "brcmf_pcie_intr", devinfo)) { + pci_disable_msi(pdev); brcmf_err("Failed to request IRQ %d\n", pdev->irq); return -EIO; } @@ -813,6 +815,7 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) brcmf_pcie_isr_thread_v2, IRQF_SHARED, "brcmf_pcie_intr", devinfo)) { + pci_disable_msi(pdev); brcmf_err("Failed to request IRQ %d\n", pdev->irq); return -EIO; } @@ -839,6 +842,7 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) return; devinfo->irq_requested = false; free_irq(pdev->irq, devinfo); + pci_disable_msi(pdev); msleep(50); count = 0; @@ -1857,6 +1861,8 @@ static struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID), + BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID), + BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID), { /* end: all zeroes */ } }; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/brcm80211/brcmfmac/proto.c index 62b940723339f825a2ded722e19ec39c8e644dee..26b68c367f57ccc33f9afda71310d10fd61455e0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/proto.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.c @@ -20,9 +20,9 @@ #include #include -#include "dhd.h" -#include "dhd_bus.h" -#include "dhd_dbg.h" +#include "core.h" +#include "bus.h" +#include "debug.h" #include "proto.h" #include "bcdc.h" #include "msgbuf.h" diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c similarity index 98% rename from drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c rename to drivers/net/wireless/brcm80211/brcmfmac/sdio.c index d20d4e6f391ae89706e422e2b817034ee030c0ae..0b0d51a61060cbc42f8f755ba9b2e543b08b9426 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c @@ -40,7 +40,7 @@ #include #include #include -#include "sdio_host.h" +#include "sdio.h" #include "chip.h" #include "firmware.h" @@ -96,8 +96,8 @@ struct rte_console { #endif /* DEBUG */ #include -#include "dhd_bus.h" -#include "dhd_dbg.h" +#include "bus.h" +#include "debug.h" #include "tracepoint.h" #define TXQLEN 2048 /* bulk tx queue length */ @@ -2762,6 +2762,48 @@ static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev) return &bus->txq; } +static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec) +{ + struct sk_buff *p; + int eprec = -1; /* precedence to evict from */ + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktq_pfull(q, prec) && !pktq_full(q)) { + brcmu_pktq_penq(q, prec, pkt); + return true; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktq_pfull(q, prec)) { + eprec = prec; + } else if (pktq_full(q)) { + p = brcmu_pktq_peek_tail(q, &eprec); + if (eprec > prec) + return false; + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + if (eprec == prec) + return false; /* refuse newer (incoming) packet */ + /* Evict packet according to discard policy */ + p = brcmu_pktq_pdeq_tail(q, eprec); + if (p == NULL) + brcmf_err("brcmu_pktq_pdeq_tail() failed\n"); + brcmu_pkt_buf_free_skb(p); + } + + /* Enqueue */ + p = brcmu_pktq_penq(q, prec, pkt); + if (p == NULL) + brcmf_err("brcmu_pktq_penq() failed\n"); + + return p != NULL; +} + static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) { int ret = -EBADE; @@ -2787,7 +2829,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) spin_lock_bh(&bus->txq_lock); /* reset bus_flags in packet cb */ *(u16 *)(pkt->cb) = 0; - if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) { + if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) { skb_pull(pkt, bus->tx_hdrlen); brcmf_err("out of bus->txq !!!\n"); ret = -ENOSR; @@ -2797,7 +2839,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) if (pktq_len(&bus->txq) >= TXHI) { bus->txoff = true; - brcmf_txflowblock(bus->sdiodev->dev, true); + brcmf_txflowblock(dev, true); } spin_unlock_bh(&bus->txq_lock); @@ -3948,6 +3990,7 @@ static struct brcmf_bus_ops brcmf_sdio_bus_ops = { .txctl = brcmf_sdio_bus_txctl, .rxctl = brcmf_sdio_bus_rxctl, .gettxq = brcmf_sdio_bus_gettxq, + .wowl_config = brcmf_sdio_wowl_config }; static void brcmf_sdio_firmware_callback(struct device *dev, @@ -4074,7 +4117,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) /* platform specific configuration: * alignments must be at least 4 bytes for ADMA - */ + */ bus->head_align = ALIGNMENT; bus->sgentry_align = ALIGNMENT; if (sdiodev->pdata) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h similarity index 98% rename from drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h rename to drivers/net/wireless/brcm80211/brcmfmac/sdio.h index f2d06cae366af25d440cc5d7cd0b2572911c4fd5..8eb42620129cb77162fc03347ca4bae13bc4f733 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h @@ -14,8 +14,8 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef _BRCM_SDH_H_ -#define _BRCM_SDH_H_ +#ifndef BRCMFMAC_SDIO_H +#define BRCMFMAC_SDIO_H #include #include @@ -186,6 +186,7 @@ struct brcmf_sdio_dev { struct sg_table sgtable; char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; + bool wowl_enabled; }; /* sdio core registers */ @@ -334,5 +335,6 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus); void brcmf_sdio_isr(struct brcmf_sdio *bus); void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick); +void brcmf_sdio_wowl_config(struct device *dev, bool enabled); -#endif /* _BRCM_SDH_H_ */ +#endif /* BRCMFMAC_SDIO_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c index b505db48c60df10115c09c01a8f5bf3e66f37ab8..a10f35c5eb3daef41697efb7fee2f4ef370410e5 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c @@ -19,4 +19,19 @@ #ifndef __CHECKER__ #define CREATE_TRACE_POINTS #include "tracepoint.h" + +void __brcmf_err(const char *func, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + pr_err("%s: %pV", func, &vaf); + trace_brcmf_err(func, &vaf); + va_end(args); +} + #endif diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 875d1142c8b0f5f65111a8674c7017a7e895721d..4572defc280fa42e7100115540a32581912bf2e2 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -23,13 +23,12 @@ #include #include #include -#include -#include - +#include "bus.h" +#include "debug.h" #include "firmware.h" -#include "usb_rdl.h" #include "usb.h" + #define IOCTL_RESP_TIMEOUT 2000 #define BRCMF_USB_RESET_GETVER_SPINWAIT 100 /* in unit of ms */ @@ -49,6 +48,71 @@ #define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin" #define BRCMF_USB_43569_FW_NAME "brcm/brcmfmac43569.bin" +#define TRX_MAGIC 0x30524448 /* "HDR0" */ +#define TRX_MAX_OFFSET 3 /* Max number of file offsets */ +#define TRX_UNCOMP_IMAGE 0x20 /* Trx holds uncompressed img */ +#define TRX_RDL_CHUNK 1500 /* size of each dl transfer */ +#define TRX_OFFSETS_DLFWLEN_IDX 0 + +/* Control messages: bRequest values */ +#define DL_GETSTATE 0 /* returns the rdl_state_t struct */ +#define DL_CHECK_CRC 1 /* currently unused */ +#define DL_GO 2 /* execute downloaded image */ +#define DL_START 3 /* initialize dl state */ +#define DL_REBOOT 4 /* reboot the device in 2 seconds */ +#define DL_GETVER 5 /* returns the bootrom_id_t struct */ +#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset + * event to occur in 2 seconds. It is the + * responsibility of the downloaded code to + * clear this event + */ +#define DL_EXEC 7 /* jump to a supplied address */ +#define DL_RESETCFG 8 /* To support single enum on dongle + * - Not used by bootloader + */ +#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup + * if resp unavailable + */ + +/* states */ +#define DL_WAITING 0 /* waiting to rx first pkt */ +#define DL_READY 1 /* hdr was good, waiting for more of the + * compressed image + */ +#define DL_BAD_HDR 2 /* hdr was corrupted */ +#define DL_BAD_CRC 3 /* compressed image was corrupted */ +#define DL_RUNNABLE 4 /* download was successful,waiting for go cmd */ +#define DL_START_FAIL 5 /* failed to initialize correctly */ +#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM + * value + */ +#define DL_IMAGE_TOOBIG 7 /* firmware image too big */ + + +struct trx_header_le { + __le32 magic; /* "HDR0" */ + __le32 len; /* Length of file including header */ + __le32 crc32; /* CRC from flag_version to end of file */ + __le32 flag_version; /* 0:15 flags, 16:31 version */ + __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of + * header + */ +}; + +struct rdl_state_le { + __le32 state; + __le32 bytes; +}; + +struct bootrom_id_le { + __le32 chip; /* Chip id */ + __le32 chiprev; /* Chip rev */ + __le32 ramsize; /* Size of RAM */ + __le32 remapbase; /* Current remap base address */ + __le32 boardtype; /* Type of board */ + __le32 boardrev; /* Board revision */ +}; + struct brcmf_usb_image { struct list_head list; s8 *fwname; @@ -93,6 +157,8 @@ struct brcmf_usbdev_info { u8 ifnum; struct urb *bulk_urb; /* used for FW download */ + + bool wowl_enabled; }; static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, @@ -600,6 +666,16 @@ static int brcmf_usb_up(struct device *dev) return 0; } +static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo) +{ + if (devinfo->ctl_urb) + usb_kill_urb(devinfo->ctl_urb); + if (devinfo->bulk_urb) + usb_kill_urb(devinfo->bulk_urb); + brcmf_usb_free_q(&devinfo->tx_postq, true); + brcmf_usb_free_q(&devinfo->rx_postq, true); +} + static void brcmf_usb_down(struct device *dev) { struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); @@ -613,14 +689,7 @@ static void brcmf_usb_down(struct device *dev) brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN); - if (devinfo->ctl_urb) - usb_kill_urb(devinfo->ctl_urb); - - if (devinfo->bulk_urb) - usb_kill_urb(devinfo->bulk_urb); - brcmf_usb_free_q(&devinfo->tx_postq, true); - - brcmf_usb_free_q(&devinfo->rx_postq, true); + brcmf_cancel_all_urbs(devinfo); } static void @@ -785,7 +854,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen) brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen); - bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC); + bulkchunk = kmalloc(TRX_RDL_CHUNK, GFP_ATOMIC); if (bulkchunk == NULL) { err = -ENOMEM; goto fail; @@ -812,10 +881,10 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen) /* Wait until the usb device reports it received all * the bytes we sent */ if ((rdlbytes == sent) && (rdlbytes != dllen)) { - if ((dllen-sent) < RDL_CHUNK) + if ((dllen-sent) < TRX_RDL_CHUNK) sendlen = dllen-sent; else - sendlen = RDL_CHUNK; + sendlen = TRX_RDL_CHUNK; /* simply avoid having to send a ZLP by ensuring we * never have an even @@ -980,21 +1049,6 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo) kfree(devinfo->rx_reqs); } -#define TRX_MAGIC 0x30524448 /* "HDR0" */ -#define TRX_VERSION 1 /* Version 1 */ -#define TRX_MAX_LEN 0x3B0000 /* Max length */ -#define TRX_NO_HEADER 1 /* Do not write TRX header */ -#define TRX_MAX_OFFSET 3 /* Max number of individual files */ -#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed image */ - -struct trx_header_le { - __le32 magic; /* "HDR0" */ - __le32 len; /* Length of file including header */ - __le32 crc32; /* CRC from flag_version to end of file */ - __le32 flag_version; /* 0:15 flags, 16:31 version */ - __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of - * header */ -}; static int check_file(const u8 *headers) { @@ -1096,11 +1150,24 @@ error: return NULL; } +static void brcmf_usb_wowl_config(struct device *dev, bool enabled) +{ + struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); + + brcmf_dbg(USB, "Configuring WOWL, enabled=%d\n", enabled); + devinfo->wowl_enabled = enabled; + if (enabled) + device_set_wakeup_enable(devinfo->dev, true); + else + device_set_wakeup_enable(devinfo->dev, false); +} + static struct brcmf_bus_ops brcmf_usb_bus_ops = { .txdata = brcmf_usb_tx, .stop = brcmf_usb_down, .txctl = brcmf_usb_tx_ctlpkt, .rxctl = brcmf_usb_rx_ctlpkt, + .wowl_config = brcmf_usb_wowl_config, }; static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo) @@ -1188,6 +1255,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) bus->ops = &brcmf_usb_bus_ops; bus->proto_type = BRCMF_PROTO_BCDC; bus->always_use_fws_queue = true; +#ifdef CONFIG_PM + bus->wowl_supported = true; +#endif if (!brcmf_usb_dlneeded(devinfo)) { ret = brcmf_usb_bus_setup(devinfo); @@ -1341,7 +1411,10 @@ static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state) brcmf_dbg(USB, "Enter\n"); devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP; - brcmf_detach(&usb->dev); + if (devinfo->wowl_enabled) + brcmf_cancel_all_urbs(devinfo); + else + brcmf_detach(&usb->dev); return 0; } @@ -1354,7 +1427,12 @@ static int brcmf_usb_resume(struct usb_interface *intf) struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); brcmf_dbg(USB, "Enter\n"); - return brcmf_usb_bus_setup(devinfo); + if (!devinfo->wowl_enabled) + return brcmf_usb_bus_setup(devinfo); + + devinfo->bus_pub.state = BRCMFMAC_USB_STATE_UP; + brcmf_usb_rx_fill_all(devinfo); + return 0; } static int brcmf_usb_reset_resume(struct usb_interface *intf) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h b/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h deleted file mode 100644 index 0a35c51c3da20201e8ce8e88d0c6e4e5fd145aea..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2011 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _USB_RDL_H -#define _USB_RDL_H - -/* Control messages: bRequest values */ -#define DL_GETSTATE 0 /* returns the rdl_state_t struct */ -#define DL_CHECK_CRC 1 /* currently unused */ -#define DL_GO 2 /* execute downloaded image */ -#define DL_START 3 /* initialize dl state */ -#define DL_REBOOT 4 /* reboot the device in 2 seconds */ -#define DL_GETVER 5 /* returns the bootrom_id_t struct */ -#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset - * event to occur in 2 seconds. It is the - * responsibility of the downloaded code to - * clear this event - */ -#define DL_EXEC 7 /* jump to a supplied address */ -#define DL_RESETCFG 8 /* To support single enum on dongle - * - Not used by bootloader - */ -#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup - * if resp unavailable - */ - -/* states */ -#define DL_WAITING 0 /* waiting to rx first pkt */ -#define DL_READY 1 /* hdr was good, waiting for more of the - * compressed image */ -#define DL_BAD_HDR 2 /* hdr was corrupted */ -#define DL_BAD_CRC 3 /* compressed image was corrupted */ -#define DL_RUNNABLE 4 /* download was successful,waiting for go cmd */ -#define DL_START_FAIL 5 /* failed to initialize correctly */ -#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM - * value */ -#define DL_IMAGE_TOOBIG 7 /* download image too big (exceeds DATA_START - * for rdl) */ - -struct rdl_state_le { - __le32 state; - __le32 bytes; -}; - -struct bootrom_id_le { - __le32 chip; /* Chip id */ - __le32 chiprev; /* Chip rev */ - __le32 ramsize; /* Size of RAM */ - __le32 remapbase; /* Current remap base address */ - __le32 boardtype; /* Type of board */ - __le32 boardrev; /* Board revision */ -}; - -#define RDL_CHUNK 1500 /* size of each dl transfer */ - -#define TRX_OFFSETS_DLFWLEN_IDX 0 -#define TRX_OFFSETS_JUMPTO_IDX 1 -#define TRX_OFFSETS_NVM_LEN_IDX 2 - -#define TRX_OFFSETS_DLBASE_IDX 0 - -#endif /* _USB_RDL_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c index 5960d827508c9d42a5f4a7c29e18ab80e559aacb..50cdf7090198b3662b83488ff52e3868162401c5 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c @@ -20,10 +20,10 @@ #include #include "fwil_types.h" -#include "dhd.h" +#include "core.h" #include "p2p.h" -#include "dhd_dbg.h" -#include "wl_cfg80211.h" +#include "debug.h" +#include "cfg80211.h" #include "vendor.h" #include "fwil.h" @@ -31,8 +31,8 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { - struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); - struct net_device *ndev = cfg_to_ndev(cfg); + struct brcmf_cfg80211_vif *vif; + struct brcmf_if *ifp; const struct brcmf_vndr_dcmd_hdr *cmdhdr = data; struct sk_buff *reply; int ret, payload, ret_len; @@ -42,6 +42,9 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, cmdhdr->len); + vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); + ifp = vif->ifp; + len -= sizeof(struct brcmf_vndr_dcmd_hdr); ret_len = cmdhdr->len; if (ret_len > 0 || len > 0) { @@ -63,11 +66,11 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, } if (cmdhdr->set) - ret = brcmf_fil_cmd_data_set(netdev_priv(ndev), cmdhdr->cmd, - dcmd_buf, ret_len); + ret = brcmf_fil_cmd_data_set(ifp, cmdhdr->cmd, dcmd_buf, + ret_len); else - ret = brcmf_fil_cmd_data_get(netdev_priv(ndev), cmdhdr->cmd, - dcmd_buf, ret_len); + ret = brcmf_fil_cmd_data_get(ifp, cmdhdr->cmd, dcmd_buf, + ret_len); if (ret != 0) goto exit; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/brcm80211/brcmsmac/debug.c index a5d4add26f416f31a7413c7a4a41158646232e4f..c9a8b9360ab14855daea5a869b02b250ba8020de 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/debug.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.c @@ -30,6 +30,7 @@ #include "main.h" #include "debug.h" #include "brcms_trace_events.h" +#include "phy/phy_int.h" static struct dentry *root_folder; @@ -71,48 +72,161 @@ struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr) } static -ssize_t brcms_debugfs_hardware_read(struct file *f, char __user *data, - size_t count, loff_t *ppos) +int brcms_debugfs_hardware_read(struct seq_file *s, void *data) { - char buf[128]; - int res; - struct brcms_pub *drvr = f->private_data; - - /* only allow read from start */ - if (*ppos > 0) - return 0; - - res = scnprintf(buf, sizeof(buf), - "board vendor: %x\n" - "board type: %x\n" - "board revision: %x\n" - "board flags: %x\n" - "board flags2: %x\n" - "firmware revision: %x\n", - drvr->wlc->hw->d11core->bus->boardinfo.vendor, - drvr->wlc->hw->d11core->bus->boardinfo.type, - drvr->wlc->hw->boardrev, - drvr->wlc->hw->boardflags, - drvr->wlc->hw->boardflags2, - drvr->wlc->ucode_rev - ); - - return simple_read_from_buffer(data, count, ppos, buf, res); + struct brcms_pub *drvr = s->private; + struct brcms_hardware *hw = drvr->wlc->hw; + struct bcma_device *core = hw->d11core; + struct bcma_bus *bus = core->bus; + char boardrev[10]; + + seq_printf(s, "chipnum 0x%x\n" + "chiprev 0x%x\n" + "chippackage 0x%x\n" + "corerev 0x%x\n" + "boardid 0x%x\n" + "boardvendor 0x%x\n" + "boardrev %s\n" + "boardflags 0x%x\n" + "boardflags2 0x%x\n" + "ucoderev 0x%x\n" + "radiorev 0x%x\n" + "phytype 0x%x\n" + "phyrev 0x%x\n" + "anarev 0x%x\n" + "nvramrev %d\n", + bus->chipinfo.id, bus->chipinfo.rev, bus->chipinfo.pkg, + core->id.rev, bus->boardinfo.type, bus->boardinfo.vendor, + brcmu_boardrev_str(hw->boardrev, boardrev), + drvr->wlc->hw->boardflags, drvr->wlc->hw->boardflags2, + drvr->wlc->ucode_rev, hw->band->radiorev, + hw->band->phytype, hw->band->phyrev, hw->band->pi->ana_rev, + hw->sromrev); + return 0; +} + +static int brcms_debugfs_macstat_read(struct seq_file *s, void *data) +{ + struct brcms_pub *drvr = s->private; + struct brcms_info *wl = drvr->ieee_hw->priv; + struct macstat stats; + int i; + + spin_lock_bh(&wl->lock); + stats = *(drvr->wlc->core->macstat_snapshot); + spin_unlock_bh(&wl->lock); + + seq_printf(s, "txallfrm: %d\n", stats.txallfrm); + seq_printf(s, "txrtsfrm: %d\n", stats.txrtsfrm); + seq_printf(s, "txctsfrm: %d\n", stats.txctsfrm); + seq_printf(s, "txackfrm: %d\n", stats.txackfrm); + seq_printf(s, "txdnlfrm: %d\n", stats.txdnlfrm); + seq_printf(s, "txbcnfrm: %d\n", stats.txbcnfrm); + seq_printf(s, "txfunfl[8]:"); + for (i = 0; i < ARRAY_SIZE(stats.txfunfl); i++) + seq_printf(s, " %d", stats.txfunfl[i]); + seq_printf(s, "\ntxtplunfl: %d\n", stats.txtplunfl); + seq_printf(s, "txphyerr: %d\n", stats.txphyerr); + seq_printf(s, "pktengrxducast: %d\n", stats.pktengrxducast); + seq_printf(s, "pktengrxdmcast: %d\n", stats.pktengrxdmcast); + seq_printf(s, "rxfrmtoolong: %d\n", stats.rxfrmtoolong); + seq_printf(s, "rxfrmtooshrt: %d\n", stats.rxfrmtooshrt); + seq_printf(s, "rxinvmachdr: %d\n", stats.rxinvmachdr); + seq_printf(s, "rxbadfcs: %d\n", stats.rxbadfcs); + seq_printf(s, "rxbadplcp: %d\n", stats.rxbadplcp); + seq_printf(s, "rxcrsglitch: %d\n", stats.rxcrsglitch); + seq_printf(s, "rxstrt: %d\n", stats.rxstrt); + seq_printf(s, "rxdfrmucastmbss: %d\n", stats.rxdfrmucastmbss); + seq_printf(s, "rxmfrmucastmbss: %d\n", stats.rxmfrmucastmbss); + seq_printf(s, "rxcfrmucast: %d\n", stats.rxcfrmucast); + seq_printf(s, "rxrtsucast: %d\n", stats.rxrtsucast); + seq_printf(s, "rxctsucast: %d\n", stats.rxctsucast); + seq_printf(s, "rxackucast: %d\n", stats.rxackucast); + seq_printf(s, "rxdfrmocast: %d\n", stats.rxdfrmocast); + seq_printf(s, "rxmfrmocast: %d\n", stats.rxmfrmocast); + seq_printf(s, "rxcfrmocast: %d\n", stats.rxcfrmocast); + seq_printf(s, "rxrtsocast: %d\n", stats.rxrtsocast); + seq_printf(s, "rxctsocast: %d\n", stats.rxctsocast); + seq_printf(s, "rxdfrmmcast: %d\n", stats.rxdfrmmcast); + seq_printf(s, "rxmfrmmcast: %d\n", stats.rxmfrmmcast); + seq_printf(s, "rxcfrmmcast: %d\n", stats.rxcfrmmcast); + seq_printf(s, "rxbeaconmbss: %d\n", stats.rxbeaconmbss); + seq_printf(s, "rxdfrmucastobss: %d\n", stats.rxdfrmucastobss); + seq_printf(s, "rxbeaconobss: %d\n", stats.rxbeaconobss); + seq_printf(s, "rxrsptmout: %d\n", stats.rxrsptmout); + seq_printf(s, "bcntxcancl: %d\n", stats.bcntxcancl); + seq_printf(s, "rxf0ovfl: %d\n", stats.rxf0ovfl); + seq_printf(s, "rxf1ovfl: %d\n", stats.rxf1ovfl); + seq_printf(s, "rxf2ovfl: %d\n", stats.rxf2ovfl); + seq_printf(s, "txsfovfl: %d\n", stats.txsfovfl); + seq_printf(s, "pmqovfl: %d\n", stats.pmqovfl); + seq_printf(s, "rxcgprqfrm: %d\n", stats.rxcgprqfrm); + seq_printf(s, "rxcgprsqovfl: %d\n", stats.rxcgprsqovfl); + seq_printf(s, "txcgprsfail: %d\n", stats.txcgprsfail); + seq_printf(s, "txcgprssuc: %d\n", stats.txcgprssuc); + seq_printf(s, "prs_timeout: %d\n", stats.prs_timeout); + seq_printf(s, "rxnack: %d\n", stats.rxnack); + seq_printf(s, "frmscons: %d\n", stats.frmscons); + seq_printf(s, "txnack: %d\n", stats.txnack); + seq_printf(s, "txglitch_nack: %d\n", stats.txglitch_nack); + seq_printf(s, "txburst: %d\n", stats.txburst); + seq_printf(s, "bphy_rxcrsglitch: %d\n", stats.bphy_rxcrsglitch); + seq_printf(s, "phywatchdog: %d\n", stats.phywatchdog); + seq_printf(s, "bphy_badplcp: %d\n", stats.bphy_badplcp); + return 0; } -static const struct file_operations brcms_debugfs_hardware_ops = { +struct brcms_debugfs_entry { + int (*read)(struct seq_file *seq, void *data); + struct brcms_pub *drvr; +}; + +static int brcms_debugfs_entry_open(struct inode *inode, struct file *f) +{ + struct brcms_debugfs_entry *entry = inode->i_private; + + return single_open(f, entry->read, entry->drvr); +} + +static const struct file_operations brcms_debugfs_def_ops = { .owner = THIS_MODULE, - .open = simple_open, - .read = brcms_debugfs_hardware_read + .open = brcms_debugfs_entry_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek }; +static int +brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn, + int (*read_fn)(struct seq_file *seq, void *data)) +{ + struct device *dev = &drvr->wlc->hw->d11core->dev; + struct dentry *dentry = drvr->dbgfs_dir; + struct brcms_debugfs_entry *entry; + + if (IS_ERR_OR_NULL(dentry)) + return -ENOENT; + + entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->read = read_fn; + entry->drvr = drvr; + + dentry = debugfs_create_file(fn, S_IRUGO, dentry, entry, + &brcms_debugfs_def_ops); + + return PTR_ERR_OR_ZERO(dentry); +} + void brcms_debugfs_create_files(struct brcms_pub *drvr) { - struct dentry *dentry = drvr->dbgfs_dir; + if (IS_ERR_OR_NULL(drvr->dbgfs_dir)) + return; - if (!IS_ERR_OR_NULL(dentry)) - debugfs_create_file("hardware", S_IRUGO, dentry, - drvr, &brcms_debugfs_hardware_ops); + brcms_debugfs_add_entry(drvr, "hardware", brcms_debugfs_hardware_read); + brcms_debugfs_add_entry(drvr, "macstat", brcms_debugfs_macstat_read); } #define __brcms_fn(fn) \ diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 43c71bfaa4744fe4094069b28ff4d9084d6c679f..f95b524422816431db9ef522dba3c815ab822273 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -764,7 +764,9 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw, return; } -static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw) +static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct brcms_info *wl = hw->priv; spin_lock_bh(&wl->lock); @@ -773,7 +775,8 @@ static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw) return; } -static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw) +static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct brcms_info *wl = hw->priv; spin_lock_bh(&wl->lock); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 1b474828d5b884fb957daf52ac59ed3c135b4b1c..a104d7ac3796491a62274f1bcf7399d15d046a22 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -445,18 +445,18 @@ static void brcms_c_detach_mfree(struct brcms_c_info *wlc) kfree(wlc->protection); kfree(wlc->stf); kfree(wlc->bandstate[0]); - kfree(wlc->corestate->macstat_snapshot); + if (wlc->corestate) + kfree(wlc->corestate->macstat_snapshot); kfree(wlc->corestate); - kfree(wlc->hw->bandstate[0]); + if (wlc->hw) + kfree(wlc->hw->bandstate[0]); kfree(wlc->hw); if (wlc->beacon) dev_kfree_skb_any(wlc->beacon); if (wlc->probe_resp) dev_kfree_skb_any(wlc->probe_resp); - /* free the wlc */ kfree(wlc); - wlc = NULL; } static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit) @@ -1009,8 +1009,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) if (txh) trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh)); - if (p) - brcmu_pkt_buf_free_skb(p); + brcmu_pkt_buf_free_skb(p); } if (dma && queue < NFIFO) { @@ -3081,7 +3080,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) static void brcms_c_statsupd(struct brcms_c_info *wlc) { int i; - struct macstat macstats; + struct macstat *macstats; #ifdef DEBUG u16 delta; u16 rxf0ovfl; @@ -3092,31 +3091,31 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc) if (!wlc->pub->up) return; + macstats = wlc->core->macstat_snapshot; + #ifdef DEBUG /* save last rx fifo 0 overflow count */ - rxf0ovfl = wlc->core->macstat_snapshot->rxf0ovfl; + rxf0ovfl = macstats->rxf0ovfl; /* save last tx fifo underflow count */ for (i = 0; i < NFIFO; i++) - txfunfl[i] = wlc->core->macstat_snapshot->txfunfl[i]; + txfunfl[i] = macstats->txfunfl[i]; #endif /* DEBUG */ /* Read mac stats from contiguous shared memory */ - brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, &macstats, - sizeof(struct macstat), OBJADDR_SHM_SEL); + brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, macstats, + sizeof(*macstats), OBJADDR_SHM_SEL); #ifdef DEBUG /* check for rx fifo 0 overflow */ - delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl); + delta = (u16)(macstats->rxf0ovfl - rxf0ovfl); if (delta) brcms_err(wlc->hw->d11core, "wl%d: %u rx fifo 0 overflows!\n", wlc->pub->unit, delta); /* check for tx fifo underflows */ for (i = 0; i < NFIFO; i++) { - delta = - (u16) (wlc->core->macstat_snapshot->txfunfl[i] - - txfunfl[i]); + delta = macstats->txfunfl[i] - txfunfl[i]; if (delta) brcms_err(wlc->hw->d11core, "wl%d: %u tx fifo %d underflows!\n", diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c index 0f7e1c7b6f58c914b5b6f8d42814481d057d3d5d..906e89ddf31903fb4c18969b46655d8d0bd50457 100644 --- a/drivers/net/wireless/brcm80211/brcmutil/utils.c +++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c @@ -261,6 +261,21 @@ struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, } EXPORT_SYMBOL(brcmu_pktq_mdeq); +/* Produce a human-readable string for boardrev */ +char *brcmu_boardrev_str(u32 brev, char *buf) +{ + char c; + + if (brev < 0x100) { + snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); + } else { + c = (brev & 0xf000) == 0x1000 ? 'P' : 'A'; + snprintf(buf, 8, "%c%03x", c, brev & 0xfff); + } + return buf; +} +EXPORT_SYMBOL(brcmu_boardrev_str); + #if defined(DEBUG) /* pretty hex print a pkt buffer chain */ void brcmu_prpkt(const char *msg, struct sk_buff *p0) @@ -292,4 +307,5 @@ void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data, size); } EXPORT_SYMBOL(brcmu_dbg_hex_dump); + #endif /* defined(DEBUG) */ diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h index af26e0de1e5c6b5325dc895b3c28c1f30af8c8c4..6996fcc144cfdfa874834481b180afd07c7e20ee 100644 --- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h @@ -68,6 +68,8 @@ #define BRCM_PCIE_43567_DEVICE_ID 0x43d3 #define BRCM_PCIE_43570_DEVICE_ID 0x43d9 #define BRCM_PCIE_43602_DEVICE_ID 0x43ba +#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb +#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc /* brcmsmac IDs */ #define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h index 8ba445b3fd72a92ca78cbd1042dcc38528df8efb..a043e29f07e2cd16382b00cccc17d615d75cd5bf 100644 --- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h +++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h @@ -218,4 +218,6 @@ void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...) } #endif +char *brcmu_boardrev_str(u32 brev, char *buf); + #endif /* _BRCMU_UTILS_H_ */ diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c index b2fb6c632092b202d484dcc6d1d73b2840ecf255..f2e276faca70e147d500aa21e68db7470ff5ea44 100644 --- a/drivers/net/wireless/cw1200/scan.c +++ b/drivers/net/wireless/cw1200/scan.c @@ -78,7 +78,7 @@ int cw1200_hw_scan(struct ieee80211_hw *hw, if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS) return -EINVAL; - frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0, + frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0, req->ie_len); if (!frame.skb) return -ENOMEM; diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index edc344334a7547006901de8e3b5dcd79db66c0a9..67cad9b05ad821fc720da095aced2a9fe72202b8 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -1363,7 +1363,7 @@ static ssize_t show_cmd_log(struct device *d, if (!priv->cmdlog) return 0; for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len; - (i != priv->cmdlog_pos) && (PAGE_SIZE - len); + (i != priv->cmdlog_pos) && (len < PAGE_SIZE); i = (i + 1) % priv->cmdlog_len) { len += snprintf(buf + len, PAGE_SIZE - len, diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h index 5ce2f59d3378ed7ddcc4891769d72401eb66cc0d..b0571618c2edf2a9b7478aee36c61c4cef915020 100644 --- a/drivers/net/wireless/ipw2x00/libipw.h +++ b/drivers/net/wireless/ipw2x00/libipw.h @@ -654,10 +654,6 @@ struct libipw_network { /* TPC Report - mandatory if spctrm mgmt required */ struct libipw_tpc_report tpc_report; - /* IBSS DFS - mandatory if spctrm mgmt required and IBSS - * NOTE: This is variable length and so must be allocated dynamically */ - struct libipw_ibss_dfs *ibss_dfs; - /* Channel Switch Announcement - optional if spctrm mgmt required */ struct libipw_csa csa; @@ -970,7 +966,6 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb, /* make sure to set stats->len */ void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header, struct libipw_rx_stats *stats); -void libipw_network_reset(struct libipw_network *network); /* libipw_geo.c */ const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee); diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c index 5f31b72a492181657f784408a3d33a11b418f221..60f28740f6afed42ab9e01c1077ae2130646ac69 100644 --- a/drivers/net/wireless/ipw2x00/libipw_module.c +++ b/drivers/net/wireless/ipw2x00/libipw_module.c @@ -84,25 +84,12 @@ static int libipw_networks_allocate(struct libipw_device *ieee) return 0; } -void libipw_network_reset(struct libipw_network *network) -{ - if (!network) - return; - - if (network->ibss_dfs) { - kfree(network->ibss_dfs); - network->ibss_dfs = NULL; - } -} - static inline void libipw_networks_free(struct libipw_device *ieee) { int i; - for (i = 0; i < MAX_NETWORK_COUNT; i++) { - kfree(ieee->networks[i]->ibss_dfs); + for (i = 0; i < MAX_NETWORK_COUNT; i++) kfree(ieee->networks[i]); - } } void libipw_networks_age(struct libipw_device *ieee, diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c index 2d66984079bb1f1bc928c1aed037851285a22ea6..a6877dd6ba7352361f6fb0401a3fd0f4cdf7bac9 100644 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c @@ -1298,13 +1298,6 @@ static int libipw_parse_info_param(struct libipw_info_element break; case WLAN_EID_IBSS_DFS: - if (network->ibss_dfs) - break; - network->ibss_dfs = kmemdup(info_element->data, - info_element->len, - GFP_ATOMIC); - if (!network->ibss_dfs) - return 1; network->flags |= NETWORK_HAS_IBSS_DFS; break; @@ -1335,9 +1328,7 @@ static int libipw_parse_info_param(struct libipw_info_element static int libipw_handle_assoc_resp(struct libipw_device *ieee, struct libipw_assoc_response *frame, struct libipw_rx_stats *stats) { - struct libipw_network network_resp = { - .ibss_dfs = NULL, - }; + struct libipw_network network_resp = { }; struct libipw_network *network = &network_resp; struct net_device *dev = ieee->dev; @@ -1472,9 +1463,6 @@ static void update_network(struct libipw_network *dst, int qos_active; u8 old_param; - libipw_network_reset(dst); - dst->ibss_dfs = src->ibss_dfs; - /* We only update the statistics if they were created by receiving * the network information on the actual channel the network is on. * @@ -1548,9 +1536,7 @@ static void libipw_process_probe_response(struct libipw_device *stats) { struct net_device *dev = ieee->dev; - struct libipw_network network = { - .ibss_dfs = NULL, - }; + struct libipw_network network = { }; struct libipw_network *target; struct libipw_network *oldest = NULL; #ifdef CONFIG_LIBIPW_DEBUG @@ -1618,7 +1604,6 @@ static void libipw_process_probe_response(struct libipw_device LIBIPW_DEBUG_SCAN("Expired '%*pE' (%pM) from network list.\n", target->ssid_len, target->ssid, target->bssid); - libipw_network_reset(target); } else { /* Otherwise just pull from the free list */ target = list_entry(ieee->network_free_list.next, @@ -1634,7 +1619,6 @@ static void libipw_process_probe_response(struct libipw_device "BEACON" : "PROBE RESPONSE"); #endif memcpy(target, &network, sizeof(*target)); - network.ibss_dfs = NULL; list_add_tail(&target->list, &ieee->network_list); } else { LIBIPW_DEBUG_SCAN("Updating '%*pE' (%pM) via %s.\n", @@ -1643,7 +1627,6 @@ static void libipw_process_probe_response(struct libipw_device is_beacon(beacon->header.frame_ctl) ? "BEACON" : "PROBE RESPONSE"); update_network(target, &network); - network.ibss_dfs = NULL; } spin_unlock_irqrestore(&ieee->lock, flags); diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index 26fec54dcd0300a7f6ec5cc96267acc917de56ea..2748fde4b90c0fa5113240765b06aaceeeeeb331 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -6063,7 +6063,7 @@ il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } void -il4965_mac_channel_switch(struct ieee80211_hw *hw, +il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *ch_switch) { struct il_priv *il = hw->priv; diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h index 337dfcf3bbde7c58fe4681268f63b9d29ea8f5fa..3a57f71b8ed57fbb98761901968ee4e4cc0110f8 100644 --- a/drivers/net/wireless/iwlegacy/4965.h +++ b/drivers/net/wireless/iwlegacy/4965.h @@ -187,8 +187,9 @@ int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 buf_size); int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -void il4965_mac_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_channel_switch *ch_switch); +void +il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel_switch *ch_switch); void il4965_led_enable(struct il_priv *il); diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 267e48a2915e7885993979337619d777c8cb74c8..ab019b45551b9ea9bef61a1861feba7601897a5f 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -59,6 +59,7 @@ config IWLDVM config IWLMVM tristate "Intel Wireless WiFi MVM Firmware support" + select WANT_DEV_COREDUMP help This is the driver that supports the MVM firmware which is currently only available for 7260 and 3160 devices. diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h index 751ae1d10b7f6d27650e1a1a966a65651dd2ca30..7a34e4d158d1a68916d9a43f49105827338f708a 100644 --- a/drivers/net/wireless/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/iwlwifi/dvm/commands.h @@ -966,21 +966,21 @@ struct iwl_rem_sta_cmd { /* WiFi queues mask */ -#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0)) -#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1)) -#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2)) -#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3)) -#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3)) +#define IWL_SCD_BK_MSK BIT(0) +#define IWL_SCD_BE_MSK BIT(1) +#define IWL_SCD_VI_MSK BIT(2) +#define IWL_SCD_VO_MSK BIT(3) +#define IWL_SCD_MGMT_MSK BIT(3) /* PAN queues mask */ -#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4)) -#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5)) -#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6)) -#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7)) -#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7)) -#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8)) +#define IWL_PAN_SCD_BK_MSK BIT(4) +#define IWL_PAN_SCD_BE_MSK BIT(5) +#define IWL_PAN_SCD_VI_MSK BIT(6) +#define IWL_PAN_SCD_VO_MSK BIT(7) +#define IWL_PAN_SCD_MGMT_MSK BIT(7) +#define IWL_PAN_SCD_MULTICAST_MSK BIT(8) -#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) +#define IWL_AGG_TX_QUEUE_MSK 0xffc00 #define IWL_DROP_ALL BIT(1) @@ -1005,12 +1005,17 @@ struct iwl_rem_sta_cmd { * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable. * 2: Dump all FIFO */ -struct iwl_txfifo_flush_cmd { +struct iwl_txfifo_flush_cmd_v3 { __le32 queue_control; __le16 flush_control; __le16 reserved; } __packed; +struct iwl_txfifo_flush_cmd_v2 { + __le16 queue_control; + __le16 flush_control; +} __packed; + /* * REPLY_WEP_KEY = 0x20 */ diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 2191621d69c1618375482d3e3a1dc7d8f2b7f192..1d2223df5cb01fc84a06a55e431825175864ba34 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -137,37 +137,38 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv, */ int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk) { - struct iwl_txfifo_flush_cmd flush_cmd; - struct iwl_host_cmd cmd = { - .id = REPLY_TXFIFO_FLUSH, - .len = { sizeof(struct iwl_txfifo_flush_cmd), }, - .data = { &flush_cmd, }, + struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = { + .flush_control = cpu_to_le16(IWL_DROP_ALL), + }; + struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = { + .flush_control = cpu_to_le16(IWL_DROP_ALL), }; - memset(&flush_cmd, 0, sizeof(flush_cmd)); + u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | + IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK; - flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | - IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | - IWL_SCD_MGMT_MSK; if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))) - flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK | - IWL_PAN_SCD_VI_MSK | - IWL_PAN_SCD_BE_MSK | - IWL_PAN_SCD_BK_MSK | - IWL_PAN_SCD_MGMT_MSK | - IWL_PAN_SCD_MULTICAST_MSK; + queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK | + IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK | + IWL_PAN_SCD_MGMT_MSK | + IWL_PAN_SCD_MULTICAST_MSK; if (priv->nvm_data->sku_cap_11n_enable) - flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK; + queue_control |= IWL_AGG_TX_QUEUE_MSK; if (scd_q_msk) - flush_cmd.queue_control = cpu_to_le32(scd_q_msk); - - IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", - flush_cmd.queue_control); - flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL); - - return iwl_dvm_send_cmd(priv, &cmd); + queue_control = scd_q_msk; + + IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control); + flush_cmd_v3.queue_control = cpu_to_le32(queue_control); + flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control); + + if (IWL_UCODE_API(priv->fw->ucode_ver) > 2) + return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, + sizeof(flush_cmd_v3), + &flush_cmd_v3); + return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, + sizeof(flush_cmd_v2), &flush_cmd_v2); } void iwlagn_dev_txfifo_flush(struct iwl_priv *priv) @@ -418,8 +419,8 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena) static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg) { - return BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3 >> - BT_UART_MSG_FRAME3SCOESCO_POS; + return (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >> + BT_UART_MSG_FRAME3SCOESCO_POS; } static void iwlagn_bt_traffic_change_work(struct work_struct *work) diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index cae692ff1013f9ee00b93861558c6144cfad4cc1..47e64e8b9517d97dff6b96e544a731d498412ba1 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -941,6 +941,7 @@ static int iwlagn_mac_sta_state(struct ieee80211_hw *hw, } static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, struct ieee80211_channel_switch *ch_switch) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index b04b8858c6905ae700ac4081e57f56f59668eaf8..e5be2d21868fa975619b72fb8217cad0f54cefe6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -73,12 +73,12 @@ #define IWL3160_UCODE_API_MAX 10 /* Oldest version we won't warn about */ -#define IWL7260_UCODE_API_OK 9 -#define IWL3160_UCODE_API_OK 9 +#define IWL7260_UCODE_API_OK 10 +#define IWL3160_UCODE_API_OK 10 /* Lowest firmware API version supported */ -#define IWL7260_UCODE_API_MIN 8 -#define IWL3160_UCODE_API_MIN 8 +#define IWL7260_UCODE_API_MIN 9 +#define IWL3160_UCODE_API_MIN 9 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d @@ -89,6 +89,8 @@ #define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265_NVM_VERSION 0x0a1d #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL7265D_NVM_VERSION 0x0c11 +#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7260_FW_PRE "iwlwifi-7260-" #define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode" @@ -102,6 +104,9 @@ #define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" +#define IWL7265D_FW_PRE "iwlwifi-7265D-" +#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" + #define NVM_HW_SECTION_NUM_FAMILY_7000 0 static const struct iwl_base_params iwl7000_base_params = { @@ -132,8 +137,8 @@ static const struct iwl_ht_params iwl7000_ht_params = { .base_params = &iwl7000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ - .non_shared_ant = ANT_A - + .non_shared_ant = ANT_A, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl7260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7260", @@ -267,7 +272,38 @@ const struct iwl_cfg iwl7265_n_cfg = { .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, }; +const struct iwl_cfg iwl7265d_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 7265", + .fw_name_pre = IWL7265D_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7265_ht_params, + .nvm_ver = IWL7265D_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, +}; + +const struct iwl_cfg iwl7265d_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 7265", + .fw_name_pre = IWL7265D_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7265_ht_params, + .nvm_ver = IWL7265D_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, +}; + +const struct iwl_cfg iwl7265d_n_cfg = { + .name = "Intel(R) Wireless N 7265", + .fw_name_pre = IWL7265D_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7265_ht_params, + .nvm_ver = IWL7265D_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, +}; + MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); +MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index d2b7234b1c73e00f76ed17094d52e0e18431d81f..bf0a95cb71535f390dfcfcc5820d39901359d440 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -72,10 +72,10 @@ #define IWL8000_UCODE_API_MAX 10 /* Oldest version we won't warn about */ -#define IWL8000_UCODE_API_OK 8 +#define IWL8000_UCODE_API_OK 10 /* Lowest firmware API version supported */ -#define IWL8000_UCODE_API_MIN 8 +#define IWL8000_UCODE_API_MIN 9 /* NVM versions */ #define IWL8000_NVM_VERSION 0x0a1d @@ -91,6 +91,10 @@ /* Max SDIO RX aggregation size of the ADDBA request/response */ #define MAX_RX_AGG_SIZE_8260_SDIO 28 +/* Max A-MPDU exponent for HT and VHT */ +#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K +#define MAX_VHT_AMPDU_EXPONENT_8260_SDIO IEEE80211_VHT_MAX_AMPDU_32K + static const struct iwl_base_params iwl8000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, .num_of_queues = IWLAGN_NUM_QUEUES, @@ -104,6 +108,7 @@ static const struct iwl_base_params iwl8000_base_params = { }; static const struct iwl_ht_params iwl8000_ht_params = { + .stbc = true, .ldpc = true, .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), }; @@ -118,6 +123,7 @@ static const struct iwl_ht_params iwl8000_ht_params = { .base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ + .d0i3 = true, \ .non_shared_ant = ANT_A const struct iwl_cfg iwl8260_2n_cfg = { @@ -136,6 +142,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = { .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; const struct iwl_cfg iwl8260_2ac_sdio_cfg = { @@ -148,6 +155,23 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = { .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, .disable_dummy_notification = true, + .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, + .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, +}; + +const struct iwl_cfg iwl4165_2ac_sdio_cfg = { + .name = "Intel(R) Dual Band Wireless-AC 4165", + .fw_name_pre = IWL8000_FW_PRE, + IWL_DEVICE_8000, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, + .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000, + .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, + .bt_shared_single_ant = true, + .disable_dummy_notification = true, + .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, + .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, }; MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 2ef83a39ff10bb877f9ff15430a10b9ac2b3b836..3a4b9c7fc083e41470f481bebb0da94f5d6bb75f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -87,6 +87,16 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_8000, }; +static inline bool iwl_has_secure_boot(u32 hw_rev, + enum iwl_device_family family) +{ + /* return 1 only for family 8000 B0 */ + if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC)) + return 1; + + return 0; +} + /* * LED mode * IWL_LED_DEFAULT: use device default @@ -246,6 +256,11 @@ struct iwl_pwr_tx_backoff { * @nvm_hw_section_num: the ID of the HW NVM section * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response + * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response + * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the + * station can receive in HT + * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the + * station can receive in VHT * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -285,6 +300,9 @@ struct iwl_cfg { const char *default_nvm_file; unsigned int max_rx_agg_size; bool disable_dummy_notification; + unsigned int max_tx_agg_size; + unsigned int max_ht_ampdu_exponent; + unsigned int max_vht_ampdu_exponent; }; /* @@ -346,9 +364,14 @@ extern const struct iwl_cfg iwl3165_2ac_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; extern const struct iwl_cfg iwl7265_2n_cfg; extern const struct iwl_cfg iwl7265_n_cfg; +extern const struct iwl_cfg iwl7265d_2ac_cfg; +extern const struct iwl_cfg iwl7265d_2n_cfg; +extern const struct iwl_cfg iwl7265d_n_cfg; extern const struct iwl_cfg iwl8260_2n_cfg; extern const struct iwl_cfg iwl8260_2ac_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; +extern const struct iwl_cfg iwl4265_2ac_sdio_cfg; +extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 3f6f015285e57ee17666af25414e5a40ffb6dd78..aff63c3f5bf84301f39a9c2419bb66575aa41de8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -129,6 +129,8 @@ #define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) #define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) +#define CSR_MBOX_SET_REG (CSR_BASE + 0x88) + #define CSR_LED_REG (CSR_BASE+0x094) #define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) #define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */ @@ -184,6 +186,8 @@ #define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ #define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */ +#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5) + #define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ #define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ @@ -305,23 +309,24 @@ enum { }; -#define CSR_HW_REV_TYPE_MSK (0x000FFF0) -#define CSR_HW_REV_TYPE_5300 (0x0000020) -#define CSR_HW_REV_TYPE_5350 (0x0000030) -#define CSR_HW_REV_TYPE_5100 (0x0000050) -#define CSR_HW_REV_TYPE_5150 (0x0000040) -#define CSR_HW_REV_TYPE_1000 (0x0000060) -#define CSR_HW_REV_TYPE_6x00 (0x0000070) -#define CSR_HW_REV_TYPE_6x50 (0x0000080) -#define CSR_HW_REV_TYPE_6150 (0x0000084) -#define CSR_HW_REV_TYPE_6x05 (0x00000B0) -#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05 -#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 -#define CSR_HW_REV_TYPE_2x30 (0x00000C0) -#define CSR_HW_REV_TYPE_2x00 (0x0000100) -#define CSR_HW_REV_TYPE_105 (0x0000110) -#define CSR_HW_REV_TYPE_135 (0x0000120) -#define CSR_HW_REV_TYPE_NONE (0x00001F0) +#define CSR_HW_REV_TYPE_MSK (0x000FFF0) +#define CSR_HW_REV_TYPE_5300 (0x0000020) +#define CSR_HW_REV_TYPE_5350 (0x0000030) +#define CSR_HW_REV_TYPE_5100 (0x0000050) +#define CSR_HW_REV_TYPE_5150 (0x0000040) +#define CSR_HW_REV_TYPE_1000 (0x0000060) +#define CSR_HW_REV_TYPE_6x00 (0x0000070) +#define CSR_HW_REV_TYPE_6x50 (0x0000080) +#define CSR_HW_REV_TYPE_6150 (0x0000084) +#define CSR_HW_REV_TYPE_6x05 (0x00000B0) +#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05 +#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 +#define CSR_HW_REV_TYPE_2x30 (0x00000C0) +#define CSR_HW_REV_TYPE_2x00 (0x0000100) +#define CSR_HW_REV_TYPE_105 (0x0000110) +#define CSR_HW_REV_TYPE_135 (0x0000120) +#define CSR_HW_REV_TYPE_7265D (0x0000210) +#define CSR_HW_REV_TYPE_NONE (0x00001F0) /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 0a70bcd241f5b9703096726e9e3e81b1cb84b5f0..6842545535582246cf5369327c7748344ce98a97 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -143,7 +143,7 @@ do { \ #define IWL_DL_INFO 0x00000001 #define IWL_DL_MAC80211 0x00000002 #define IWL_DL_HCMD 0x00000004 -#define IWL_DL_STATE 0x00000008 +#define IWL_DL_TDLS 0x00000008 /* 0x000000F0 - 0x00000010 */ #define IWL_DL_QUOTA 0x00000010 #define IWL_DL_TE 0x00000020 @@ -180,6 +180,7 @@ do { \ #define IWL_DL_TX_QUEUES 0x80000000 #define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a) #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) #define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a) #define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 0f1084f09caabfdf1265c4e34ba54bbd682cffaa..38de1513e4dedd5588367e44d2877ceed39234b6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -78,9 +78,6 @@ #include "iwl-config.h" #include "iwl-modparams.h" -/* private includes */ -#include "iwl-fw-file.h" - /****************************************************************************** * * module boiler plate @@ -187,6 +184,11 @@ static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img) static void iwl_dealloc_ucode(struct iwl_drv *drv) { int i; + + kfree(drv->fw.dbg_dest_tlv); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) + kfree(drv->fw.dbg_conf_tlv[i]); + for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); } @@ -248,6 +250,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) /* * Starting 8000B - FW name format has changed. This overwrites the * previous name and uses the new format. + * + * TODO: + * Once there is only one supported step for 8000 family - delete this! */ if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { char rev_step[2] = { @@ -258,6 +263,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP) rev_step[0] = 0; + /* + * If hw_rev wasn't set yet - default as B-step. If it IS A-step + * we'll reload that FW later instead. + */ + if (drv->trans->hw_rev == 0) + rev_step[0] = 'B'; + snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s-%s.ucode", name_pre, rev_step, tag); } @@ -301,6 +313,11 @@ struct iwl_firmware_pieces { u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; + + /* FW debug data parsed for driver usage */ + struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; + size_t dbg_conf_tlv_len[FW_DBG_MAX]; }; /* @@ -574,6 +591,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, char buildstr[25]; u32 build; int num_of_cpus; + bool usniffer_images = false; + bool usniffer_req = false; if (len < sizeof(*ucode)) { IWL_ERR(drv, "uCode has invalid length: %zd\n", len); @@ -807,19 +826,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len); drv->fw.mvm_fw = true; - drv->fw.img[IWL_UCODE_REGULAR].is_secure = true; break; case IWL_UCODE_TLV_SECURE_SEC_INIT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, tlv_len); drv->fw.mvm_fw = true; - drv->fw.img[IWL_UCODE_INIT].is_secure = true; break; case IWL_UCODE_TLV_SECURE_SEC_WOWLAN: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, tlv_len); drv->fw.mvm_fw = true; - drv->fw.img[IWL_UCODE_WOWLAN].is_secure = true; break; case IWL_UCODE_TLV_NUM_OF_CPU: if (tlv_len != sizeof(u32)) @@ -849,12 +865,79 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, capa->n_scan_channels = le32_to_cpup((__le32 *)tlv_data); break; + case IWL_UCODE_TLV_FW_DBG_DEST: { + struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data; + + if (pieces->dbg_dest_tlv) { + IWL_ERR(drv, + "dbg destination ignored, already exists\n"); + break; + } + + pieces->dbg_dest_tlv = dest; + IWL_INFO(drv, "Found debug destination: %s\n", + get_fw_dbg_mode_string(dest->monitor_mode)); + + drv->fw.dbg_dest_reg_num = + tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv, + reg_ops); + drv->fw.dbg_dest_reg_num /= + sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]); + + break; + } + case IWL_UCODE_TLV_FW_DBG_CONF: { + struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data; + + if (!pieces->dbg_dest_tlv) { + IWL_ERR(drv, + "Ignore dbg config %d - no destination configured\n", + conf->id); + break; + } + + if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) { + IWL_ERR(drv, + "Skip unknown configuration: %d\n", + conf->id); + break; + } + + if (pieces->dbg_conf_tlv[conf->id]) { + IWL_ERR(drv, + "Ignore duplicate dbg config %d\n", + conf->id); + break; + } + + if (conf->usniffer) + usniffer_req = true; + + IWL_INFO(drv, "Found debug configuration: %d\n", + conf->id); + + pieces->dbg_conf_tlv[conf->id] = conf; + pieces->dbg_conf_tlv_len[conf->id] = tlv_len; + break; + } + case IWL_UCODE_TLV_SEC_RT_USNIFFER: + usniffer_images = true; + iwl_store_ucode_sec(pieces, tlv_data, + IWL_UCODE_REGULAR_USNIFFER, + tlv_len); + break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; } } + if (usniffer_req && !usniffer_images) { + IWL_ERR(drv, + "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); + return -EINVAL; + } + if (len) { IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len); iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len); @@ -992,13 +1075,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) struct iwl_ucode_header *ucode; struct iwlwifi_opmode_table *op; int err; - struct iwl_firmware_pieces pieces; + struct iwl_firmware_pieces *pieces; const unsigned int api_max = drv->cfg->ucode_api_max; unsigned int api_ok = drv->cfg->ucode_api_ok; const unsigned int api_min = drv->cfg->ucode_api_min; u32 api_ver; int i; bool load_module = false; + u32 hw_rev = drv->trans->hw_rev; fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = @@ -1008,7 +1092,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) if (!api_ok) api_ok = api_max; - memset(&pieces, 0, sizeof(pieces)); + pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); + if (!pieces) + return; if (!ucode_raw) { if (drv->fw_index <= api_ok) @@ -1031,10 +1117,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ucode = (struct iwl_ucode_header *)ucode_raw->data; if (ucode->ver) - err = iwl_parse_v1_v2_firmware(drv, ucode_raw, &pieces); + err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces); else - err = iwl_parse_tlv_firmware(drv, ucode_raw, &pieces, - &fw->ucode_capa); + err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces, + &fw->ucode_capa); if (err) goto try_again; @@ -1074,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * In mvm uCode there is no difference between data and instructions * sections. */ - if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, drv->cfg)) + if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg)) goto try_again; /* Allocate ucode buffers for card's bus-master loading ... */ @@ -1083,9 +1169,33 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) - if (iwl_alloc_ucode(drv, &pieces, i)) + if (iwl_alloc_ucode(drv, pieces, i)) goto out_free_fw; + if (pieces->dbg_dest_tlv) { + drv->fw.dbg_dest_tlv = + kmemdup(pieces->dbg_dest_tlv, + sizeof(*pieces->dbg_dest_tlv) + + sizeof(pieces->dbg_dest_tlv->reg_ops[0]) * + drv->fw.dbg_dest_reg_num, GFP_KERNEL); + + if (!drv->fw.dbg_dest_tlv) + goto out_free_fw; + } + + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) { + if (pieces->dbg_conf_tlv[i]) { + drv->fw.dbg_conf_tlv_len[i] = + pieces->dbg_conf_tlv_len[i]; + drv->fw.dbg_conf_tlv[i] = + kmemdup(pieces->dbg_conf_tlv[i], + drv->fw.dbg_conf_tlv_len[i], + GFP_KERNEL); + if (!drv->fw.dbg_conf_tlv[i]) + goto out_free_fw; + } + } + /* Now that we can no longer fail, copy information */ /* @@ -1093,20 +1203,20 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * for each event, which is of mode 1 (including timestamp) for all * new microcodes that include this information. */ - fw->init_evtlog_ptr = pieces.init_evtlog_ptr; - if (pieces.init_evtlog_size) - fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12; + fw->init_evtlog_ptr = pieces->init_evtlog_ptr; + if (pieces->init_evtlog_size) + fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12; else fw->init_evtlog_size = drv->cfg->base_params->max_event_log_size; - fw->init_errlog_ptr = pieces.init_errlog_ptr; - fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr; - if (pieces.inst_evtlog_size) - fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; + fw->init_errlog_ptr = pieces->init_errlog_ptr; + fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr; + if (pieces->inst_evtlog_size) + fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12; else fw->inst_evtlog_size = drv->cfg->base_params->max_event_log_size; - fw->inst_errlog_ptr = pieces.inst_errlog_ptr; + fw->inst_errlog_ptr = pieces->inst_errlog_ptr; /* * figure out the offset of chain noise reset and gain commands @@ -1165,10 +1275,55 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) op->name, err); #endif } + + /* + * We may have loaded the wrong FW file in 8000 HW family if it is an + * A-step card, and if drv->trans->hw_rev wasn't properly read when + * the FW file had been loaded. (This might happen in SDIO.) In such a + * case - unload and reload the correct file. + * + * TODO: + * Once there is only one supported step for 8000 family - delete this! + */ + if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && + CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP && + drv->trans->hw_rev != hw_rev) { + char firmware_name[32]; + + /* Free previous FW resources */ + if (drv->op_mode) + _iwl_op_mode_stop(drv); + iwl_dealloc_ucode(drv); + + /* Build name of correct-step FW */ + snprintf(firmware_name, sizeof(firmware_name), + strrchr(drv->firmware_name, '-')); + snprintf(drv->firmware_name, sizeof(drv->firmware_name), + "%s%s", drv->cfg->fw_name_pre, firmware_name); + + /* Clear data before loading correct FW */ + list_del(&drv->list); + + /* Request correct FW file this time */ + IWL_DEBUG_INFO(drv, "attempting to load A-step FW %s\n", + drv->firmware_name); + err = request_firmware(&ucode_raw, drv->firmware_name, + drv->trans->dev); + if (err) { + IWL_ERR(drv, "Failed swapping FW!\n"); + goto out_unbind; + } + + /* Redo callback function - this time with right FW */ + iwl_req_fw_callback(ucode_raw, context); + } + + kfree(pieces); return; try_again: /* try next, if any */ + kfree(pieces); release_firmware(ucode_raw); if (iwl_request_firmware(drv, false)) goto out_unbind; @@ -1179,6 +1334,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) iwl_dealloc_ucode(drv); release_firmware(ucode_raw); out_unbind: + kfree(pieces); complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); } diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 74b796dc424287172b363fc11bc430c93bf9ef97..41ff85de73343b0a5686bfd175164807e8dc4684 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c @@ -764,7 +764,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, if (iwlwifi_mod_params.amsdu_size_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; - ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent; ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; ht_info->mcs.rx_mask[0] = 0xFF; diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h index e30a41d04c8b3ac1af8878ab1d72fc02419aa80a..20a8a64c9fe31dd5e1b9b2244b6ca86b19868fd4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h @@ -81,6 +81,7 @@ * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several * sections like this in a single file. + * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers */ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_SRAM = 0, @@ -90,6 +91,8 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4, IWL_FW_ERROR_DUMP_FW_MONITOR = 5, IWL_FW_ERROR_DUMP_PRPH = 6, + IWL_FW_ERROR_DUMP_TXF = 7, + IWL_FW_ERROR_DUMP_FH_REGS = 8, IWL_FW_ERROR_DUMP_MAX, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 401f7be36b934b7524d56508c579e663212c8782..f2a047f6bb3e519a6ab3ce3032be18aac3aa9aa9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -131,6 +131,9 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_API_CHANGES_SET = 29, IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, + IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, + IWL_UCODE_TLV_FW_DBG_DEST = 38, + IWL_UCODE_TLV_FW_DBG_CONF = 39, }; struct iwl_ucode_tlv { @@ -179,4 +182,309 @@ struct iwl_ucode_capa { __le32 api_capa; } __packed; +/** + * enum iwl_ucode_tlv_flag - ucode API flags + * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously + * was a separate TLV but moved here to save space. + * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID, + * treats good CRC threshold as a boolean + * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). + * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. + * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS + * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD + * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan + * offload profile config command. + * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six + * (rather than two) IPv6 addresses + * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element + * from the probe request template. + * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) + * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) + * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC + * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and + * P2P client interfaces simultaneously if they are in different bindings. + * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and + * P2P client interfaces simultaneously if they are in same bindings. + * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD + * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save + * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. + * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients + * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. + */ +enum iwl_ucode_tlv_flag { + IWL_UCODE_TLV_FLAGS_PAN = BIT(0), + IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), + IWL_UCODE_TLV_FLAGS_MFP = BIT(2), + IWL_UCODE_TLV_FLAGS_P2P = BIT(3), + IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), + IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), + IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), + IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), + IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), + IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), + IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21), + IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22), + IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23), + IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), + IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), + IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), + IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), + IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30), +}; + +/** + * enum iwl_ucode_tlv_api - ucode api + * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field. + * @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification + * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex + * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA. + * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit. + * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API. + * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. + * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time + * longer than the passive one, which is essential for fragmented scan. + */ +enum iwl_ucode_tlv_api { + IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), + IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1), + IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), + IWL_UCODE_TLV_API_CSA_FLOW = BIT(4), + IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5), + IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), + IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), + IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), +}; + +/** + * enum iwl_ucode_tlv_capa - ucode capabilities + * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 + * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory + * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. + * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality + * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current + * tx power value into TPC Report action frame and Link Measurement Report + * action frame + * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current + * channel in DS parameter set element in probe requests. + * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in + * probe requests. + * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests + * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), + * which also implies support for the scheduler configuration command + * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching + * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command + */ +enum iwl_ucode_tlv_capa { + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), + IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1), + IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2), + IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6), + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8), + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9), + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), + IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13), + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), +}; + +/* The default calibrate table size if not specified by firmware file */ +#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 +#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 +#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 + +/* The default max probe length if not specified by the firmware file */ +#define IWL_DEFAULT_MAX_PROBE_LENGTH 200 + +/* + * For 16.0 uCode and above, there is no differentiation between sections, + * just an offset to the HW address. + */ +#define IWL_UCODE_SECTION_MAX 12 +#define IWL_API_ARRAY_SIZE 1 +#define IWL_CAPABILITIES_ARRAY_SIZE 1 +#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC + +/* uCode version contains 4 values: Major/Minor/API/Serial */ +#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) +#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) +#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) +#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) + +/* + * Calibration control struct. + * Sent as part of the phy configuration command. + * @flow_trigger: bitmap for which calibrations to perform according to + * flow triggers. + * @event_trigger: bitmap for which calibrations to perform according to + * event triggers. + */ +struct iwl_tlv_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +} __packed; + +enum iwl_fw_phy_cfg { + FW_PHY_CFG_RADIO_TYPE_POS = 0, + FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS, + FW_PHY_CFG_RADIO_STEP_POS = 2, + FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS, + FW_PHY_CFG_RADIO_DASH_POS = 4, + FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS, + FW_PHY_CFG_TX_CHAIN_POS = 16, + FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, + FW_PHY_CFG_RX_CHAIN_POS = 20, + FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, +}; + +#define IWL_UCODE_MAX_CS 1 + +/** + * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW. + * @cipher: a cipher suite selector + * @flags: cipher scheme flags (currently reserved for a future use) + * @hdr_len: a size of MPDU security header + * @pn_len: a size of PN + * @pn_off: an offset of pn from the beginning of the security header + * @key_idx_off: an offset of key index byte in the security header + * @key_idx_mask: a bit mask of key_idx bits + * @key_idx_shift: bit shift needed to get key_idx + * @mic_len: mic length in bytes + * @hw_cipher: a HW cipher index used in host commands + */ +struct iwl_fw_cipher_scheme { + __le32 cipher; + u8 flags; + u8 hdr_len; + u8 pn_len; + u8 pn_off; + u8 key_idx_off; + u8 key_idx_mask; + u8 key_idx_shift; + u8 mic_len; + u8 hw_cipher; +} __packed; + +enum iwl_fw_dbg_reg_operator { + CSR_ASSIGN, + CSR_SETBIT, + CSR_CLEARBIT, + + PRPH_ASSIGN, + PRPH_SETBIT, + PRPH_CLEARBIT, +}; + +/** + * struct iwl_fw_dbg_reg_op - an operation on a register + * + * @op: %enum iwl_fw_dbg_reg_operator + * @addr: offset of the register + * @val: value + */ +struct iwl_fw_dbg_reg_op { + u8 op; + u8 reserved[3]; + __le32 addr; + __le32 val; +} __packed; + +/** + * enum iwl_fw_dbg_monitor_mode - available monitor recording modes + * + * @SMEM_MODE: monitor stores the data in SMEM + * @EXTERNAL_MODE: monitor stores the data in allocated DRAM + * @MARBH_MODE: monitor stores the data in MARBH buffer + */ +enum iwl_fw_dbg_monitor_mode { + SMEM_MODE = 0, + EXTERNAL_MODE = 1, + MARBH_MODE = 2, +}; + +/** + * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data + * + * @version: version of the TLV - currently 0 + * @monitor_mode: %enum iwl_fw_dbg_monitor_mode + * @base_reg: addr of the base addr register (PRPH) + * @end_reg: addr of the end addr register (PRPH) + * @write_ptr_reg: the addr of the reg of the write pointer + * @wrap_count: the addr of the reg of the wrap_count + * @base_shift: shift right of the base addr reg + * @end_shift: shift right of the end addr reg + * @reg_ops: array of registers operations + * + * This parses IWL_UCODE_TLV_FW_DBG_DEST + */ +struct iwl_fw_dbg_dest_tlv { + u8 version; + u8 monitor_mode; + u8 reserved[2]; + __le32 base_reg; + __le32 end_reg; + __le32 write_ptr_reg; + __le32 wrap_count; + u8 base_shift; + u8 end_shift; + struct iwl_fw_dbg_reg_op reg_ops[0]; +} __packed; + +struct iwl_fw_dbg_conf_hcmd { + u8 id; + u8 reserved; + __le16 len; + u8 data[0]; +} __packed; + +/** + * struct iwl_fw_dbg_trigger - a TLV that describes a debug configuration + * + * @enabled: is this trigger enabled + * @reserved: + * @len: length, in bytes, of the %trigger field + * @trigger: pointer to a trigger struct + */ +struct iwl_fw_dbg_trigger { + u8 enabled; + u8 reserved; + u8 len; + u8 trigger[0]; +} __packed; + +/** + * enum iwl_fw_dbg_conf - configurations available + * + * @FW_DBG_CUSTOM: take this configuration from alive + * Note that the trigger is NO-OP for this configuration + */ +enum iwl_fw_dbg_conf { + FW_DBG_CUSTOM = 0, + + /* must be last */ + FW_DBG_MAX, + FW_DBG_INVALID = 0xff, +}; + +/** + * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration + * + * @id: %enum iwl_fw_dbg_conf + * @usniffer: should the uSniffer image be used + * @num_of_hcmds: how many HCMDs to send are present here + * @hcmd: a variable length host command to be sent to apply the configuration. + * If there is more than one HCMD to send, they will appear one after the + * other and be sent in the order that they appear in. + * This parses IWL_UCODE_TLV_FW_DBG_CONF + */ +struct iwl_fw_dbg_conf_tlv { + u8 id; + u8 usniffer; + u8 reserved; + u8 num_of_hcmds; + struct iwl_fw_dbg_conf_hcmd hcmd; + + /* struct iwl_fw_dbg_trigger sits after all variable length hcmds */ +} __packed; + #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index b894a84e8393062a113102c8bbe96cf4b28f4f24..e6dc3b8709492f06d7833ce74f5b87f886c8805f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -69,112 +69,6 @@ #include "iwl-fw-file.h" -/** - * enum iwl_ucode_tlv_flag - ucode API flags - * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously - * was a separate TLV but moved here to save space. - * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID, - * treats good CRC threshold as a boolean - * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). - * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. - * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS - * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD - * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan - * offload profile config command. - * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six - * (rather than two) IPv6 addresses - * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element - * from the probe request template. - * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) - * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) - * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC - * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and - * P2P client interfaces simultaneously if they are in different bindings. - * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and - * P2P client interfaces simultaneously if they are in same bindings. - * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD - * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save - * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. - * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients - * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. - */ -enum iwl_ucode_tlv_flag { - IWL_UCODE_TLV_FLAGS_PAN = BIT(0), - IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), - IWL_UCODE_TLV_FLAGS_MFP = BIT(2), - IWL_UCODE_TLV_FLAGS_P2P = BIT(3), - IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), - IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), - IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), - IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), - IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), - IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), - IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21), - IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22), - IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23), - IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), - IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), - IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), - IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), - IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30), -}; - -/** - * enum iwl_ucode_tlv_api - ucode api - * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field. - * @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification - * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex - * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA. - * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit. - * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API. - * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. - * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time - * longer than the passive one, which is essential for fragmented scan. - */ -enum iwl_ucode_tlv_api { - IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), - IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1), - IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), - IWL_UCODE_TLV_API_CSA_FLOW = BIT(4), - IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5), - IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), - IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), - IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), -}; - -/** - * enum iwl_ucode_tlv_capa - ucode capabilities - * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 - * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current - * tx power value into TPC Report action frame and Link Measurement Report - * action frame - * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports adding DS params - * element in probe requests. - * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in - * probe requests. - * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests - * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), - * which also implies support for the scheduler configuration command - * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command - */ -enum iwl_ucode_tlv_capa { - IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8), - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9), - IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), - IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), - IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), - IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), -}; - -/* The default calibrate table size if not specified by firmware file */ -#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 -#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 -#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 - -/* The default max probe length if not specified by the firmware file */ -#define IWL_DEFAULT_MAX_PROBE_LENGTH 200 - /** * enum iwl_ucode_type * @@ -183,11 +77,13 @@ enum iwl_ucode_tlv_capa { * @IWL_UCODE_REGULAR: Normal runtime ucode * @IWL_UCODE_INIT: Initial ucode * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode + * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image */ enum iwl_ucode_type { IWL_UCODE_REGULAR, IWL_UCODE_INIT, IWL_UCODE_WOWLAN, + IWL_UCODE_REGULAR_USNIFFER, IWL_UCODE_TYPE_MAX, }; @@ -202,14 +98,6 @@ enum iwl_ucode_sec { IWL_UCODE_SECTION_DATA, IWL_UCODE_SECTION_INST, }; -/* - * For 16.0 uCode and above, there is no differentiation between sections, - * just an offset to the HW address. - */ -#define IWL_UCODE_SECTION_MAX 12 -#define IWL_API_ARRAY_SIZE 1 -#define IWL_CAPABILITIES_ARRAY_SIZE 1 -#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC struct iwl_ucode_capabilities { u32 max_probe_length; @@ -229,7 +117,6 @@ struct fw_desc { struct fw_img { struct fw_desc sec[IWL_UCODE_SECTION_MAX]; - bool is_secure; bool is_dual_cpus; }; @@ -238,66 +125,6 @@ struct iwl_sf_region { u32 size; }; -/* uCode version contains 4 values: Major/Minor/API/Serial */ -#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) -#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) -#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) -#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) - -/* - * Calibration control struct. - * Sent as part of the phy configuration command. - * @flow_trigger: bitmap for which calibrations to perform according to - * flow triggers. - * @event_trigger: bitmap for which calibrations to perform according to - * event triggers. - */ -struct iwl_tlv_calib_ctrl { - __le32 flow_trigger; - __le32 event_trigger; -} __packed; - -enum iwl_fw_phy_cfg { - FW_PHY_CFG_RADIO_TYPE_POS = 0, - FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS, - FW_PHY_CFG_RADIO_STEP_POS = 2, - FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS, - FW_PHY_CFG_RADIO_DASH_POS = 4, - FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS, - FW_PHY_CFG_TX_CHAIN_POS = 16, - FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, - FW_PHY_CFG_RX_CHAIN_POS = 20, - FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, -}; - -#define IWL_UCODE_MAX_CS 1 - -/** - * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW. - * @cipher: a cipher suite selector - * @flags: cipher scheme flags (currently reserved for a future use) - * @hdr_len: a size of MPDU security header - * @pn_len: a size of PN - * @pn_off: an offset of pn from the beginning of the security header - * @key_idx_off: an offset of key index byte in the security header - * @key_idx_mask: a bit mask of key_idx bits - * @key_idx_shift: bit shift needed to get key_idx - * @mic_len: mic length in bytes - * @hw_cipher: a HW cipher index used in host commands - */ -struct iwl_fw_cipher_scheme { - __le32 cipher; - u8 flags; - u8 hdr_len; - u8 pn_len; - u8 pn_off; - u8 key_idx_off; - u8 key_idx_mask; - u8 key_idx_shift; - u8 mic_len; - u8 hw_cipher; -} __packed; - /** * struct iwl_fw_cscheme_list - a cipher scheme list * @size: a number of entries @@ -324,6 +151,11 @@ struct iwl_fw_cscheme_list { * @inst_errlog_ptr: error log offfset for runtime ucode. * @mvm_fw: indicates this is MVM firmware * @cipher_scheme: optional external cipher scheme. + * @human_readable: human readable version + * @dbg_dest_tlv: points to the destination TLV for debug + * @dbg_conf_tlv: array of pointers to configuration TLVs for debug + * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries + * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv */ struct iwl_fw { u32 ucode_ver; @@ -348,6 +180,68 @@ struct iwl_fw { struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; + + struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; + size_t dbg_conf_tlv_len[FW_DBG_MAX]; + + u8 dbg_dest_reg_num; }; +static inline const char *get_fw_dbg_mode_string(int mode) +{ + switch (mode) { + case SMEM_MODE: + return "SMEM"; + case EXTERNAL_MODE: + return "EXTERNAL_DRAM"; + case MARBH_MODE: + return "MARBH"; + default: + return "UNKNOWN"; + } +} + +static inline const struct iwl_fw_dbg_trigger * +iwl_fw_dbg_conf_get_trigger(const struct iwl_fw *fw, u8 id) +{ + const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id]; + u8 *ptr; + int i; + + if (!conf_tlv) + return NULL; + + ptr = (void *)&conf_tlv->hcmd; + for (i = 0; i < conf_tlv->num_of_hcmds; i++) { + ptr += sizeof(conf_tlv->hcmd); + ptr += le16_to_cpu(conf_tlv->hcmd.len); + } + + return (const struct iwl_fw_dbg_trigger *)ptr; +} + +static inline bool +iwl_fw_dbg_conf_enabled(const struct iwl_fw *fw, u8 id) +{ + const struct iwl_fw_dbg_trigger *trigger = + iwl_fw_dbg_conf_get_trigger(fw, id); + + if (!trigger) + return false; + + return trigger->enabled; +} + +static inline bool +iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) +{ + const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id]; + + if (!conf_tlv) + return false; + + return conf_tlv->usniffer; +} + #endif /* __iwl_fw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index c302e7468559ced39edc4871466a52938091a039..06e02fcd6f7b1319ccdd2c85cb21184c2074d7b1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -325,6 +325,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, { int num_rx_ants = num_of_ant(rx_chains); int num_tx_ants = num_of_ant(tx_chains); + unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?: + IEEE80211_VHT_MAX_AMPDU_1024K); vht_cap->vht_supported = true; @@ -332,7 +334,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | - 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + max_ampdu_exponent << + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; if (cfg->ht_params->ldpc) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h index b6d666ee8359df46c5052135fd7ddcfb82525868..17de6d46222ae7c803605017c066453d73076f01 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h @@ -138,7 +138,8 @@ struct iwl_cfg; * @nic_config: configure NIC, called before firmware is started. * May sleep * @wimax_active: invoked when WiMax becomes active. May sleep - * @enter_d0i3: configure the fw to enter d0i3. May sleep. + * @enter_d0i3: configure the fw to enter d0i3. return 1 to indicate d0i3 + * entrance is aborted (e.g. due to held reference). May sleep. * @exit_d0i3: configure the fw to exit d0i3. May sleep. */ struct iwl_op_mode_ops { diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 1560f4576c7d3ce69e7373f5fdb688a0243f2cb1..2df51eab134872fe511d5e498397e2e22ae94ff3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -322,6 +322,7 @@ enum secure_boot_config_reg { LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002, }; +#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0 (0xA01E30) #define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30) #define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34) enum secure_boot_status_reg { @@ -333,6 +334,7 @@ enum secure_boot_status_reg { LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003, }; +#define FH_UCODE_LOAD_STATUS (0x1AF0) #define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70) enum secure_load_status_reg { LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001, @@ -352,7 +354,7 @@ enum secure_load_status_reg { #define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) #define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) -#define LMPM_SECURE_TIME_OUT (100) +#define LMPM_SECURE_TIME_OUT (100) /* 10 micro */ /* Rx FIFO */ #define RXF_SIZE_ADDR (0xa00c88) @@ -368,4 +370,10 @@ enum secure_load_status_reg { #define MON_BUFF_WRPTR (0xa03c44) #define MON_BUFF_CYCLE_CNT (0xa03c48) +/* FW chicken bits */ +#define LMPM_CHICK 0xA01FF8 +enum { + LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0), +}; + #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index d8fc548c0d6cd561993eebb6df185e3ccad0d6ac..028408a6ecba44cdcfa9aca8fd160156e9f8ddb0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -534,10 +534,10 @@ struct iwl_trans_ops { u32 value); void (*ref)(struct iwl_trans *trans); void (*unref)(struct iwl_trans *trans); + void (*suspend)(struct iwl_trans *trans); + void (*resume)(struct iwl_trans *trans); -#ifdef CONFIG_IWLWIFI_DEBUGFS struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans); -#endif }; /** @@ -574,6 +574,9 @@ enum iwl_trans_state { * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the * start of the 802.11 header in the @rx_mpdu_cmd * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) + * @dbg_dest_tlv: points to the destination TLV for debug + * @dbg_conf_tlv: array of pointers to configuration TLVs for debug + * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv */ struct iwl_trans { const struct iwl_trans_ops *ops; @@ -605,6 +608,10 @@ struct iwl_trans { u64 dflt_pwr_limit; + const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; + u8 dbg_dest_reg_num; + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __aligned(sizeof(void *)); @@ -704,7 +711,18 @@ static inline void iwl_trans_unref(struct iwl_trans *trans) trans->ops->unref(trans); } -#ifdef CONFIG_IWLWIFI_DEBUGFS +static inline void iwl_trans_suspend(struct iwl_trans *trans) +{ + if (trans->ops->suspend) + trans->ops->suspend(trans); +} + +static inline void iwl_trans_resume(struct iwl_trans *trans) +{ + if (trans->ops->resume) + trans->ops->resume(trans); +} + static inline struct iwl_trans_dump_data * iwl_trans_dump_data(struct iwl_trans *trans) { @@ -712,7 +730,6 @@ iwl_trans_dump_data(struct iwl_trans *trans) return NULL; return trans->ops->dump_data(trans); } -#endif static inline int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index da2ffb78519431a1b14e9b355280a3c7a3446ed6..a3bfda45d9e6a28bdd44e2000498456988ef6558 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -72,8 +72,6 @@ #include "mvm.h" #include "iwl-debug.h" -#define BT_ANTENNA_COUPLING_THRESHOLD (30) - const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = { [BT_KILL_MSK_DEFAULT] = 0xfffffc00, [BT_KILL_MSK_NEVER] = 0xffffffff, @@ -302,11 +300,6 @@ static const __le64 iwl_ci_mask[][3] = { }, }; -static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { - cpu_to_le32(0x2e402280), - cpu_to_le32(0x7711a751), -}; - struct corunning_block_luts { u8 range; __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; @@ -605,7 +598,7 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) bt_cmd->max_kill = cpu_to_le32(5); bt_cmd->bt4_antenna_isolation_thr = - cpu_to_le32(BT_ANTENNA_COUPLING_THRESHOLD); + cpu_to_le32(IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS); bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15); bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15); bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT); @@ -638,8 +631,8 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost, sizeof(iwl_bt_prio_boost)); - memcpy(&bt_cmd->multiprio_lut, iwl_bt_mprio_lut, - sizeof(iwl_bt_mprio_lut)); + bt_cmd->multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0); + bt_cmd->multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1); send_cmd: memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); @@ -1144,6 +1137,22 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, return lut_type != BT_COEX_LOOSE_LUT; } +bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) +{ + /* there is no other antenna, shared antenna is always available */ + if (mvm->cfg->bt_shared_single_ant) + return true; + + if (ant & mvm->cfg->non_shared_ant) + return true; + + if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); + + return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < + BT_HIGH_TRAFFIC; +} + bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) { /* there is no other antenna, shared antenna is always available */ diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index 8a1d2f33d5b7da2768a7e5849e305b6386b0b480..b3210cfbecc8e4f68254361efca6b815a05f12e6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -102,8 +102,6 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { #undef EVENT_PRIO_ANT -#define BT_ANTENNA_COUPLING_THRESHOLD (30) - static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) { if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) @@ -290,11 +288,6 @@ static const __le64 iwl_ci_mask[][3] = { }, }; -static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { - cpu_to_le32(0x2e402280), - cpu_to_le32(0x7711a751), -}; - struct corunning_block_luts { u8 range; __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; @@ -593,7 +586,8 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) } bt_cmd->max_kill = 5; - bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD; + bt_cmd->bt4_antenna_isolation_thr = + IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS; bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling; bt_cmd->bt4_tx_tx_delta_freq_thr = 15; bt_cmd->bt4_tx_rx_max_freq0 = 15; @@ -618,7 +612,9 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) BT_VALID_ANT_ISOLATION_THRS | BT_VALID_TXTX_DELTA_FREQ_THRS | BT_VALID_TXRX_MAX_FREQ_0 | - BT_VALID_SYNC_TO_SCO); + BT_VALID_SYNC_TO_SCO | + BT_VALID_TTC | + BT_VALID_RRC); if (IWL_MVM_BT_COEX_SYNC2SCO) bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); @@ -634,6 +630,12 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); } + if (IWL_MVM_BT_COEX_TTC) + bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC); + + if (IWL_MVM_BT_COEX_RRC) + bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC); + if (mvm->cfg->bt_shared_single_ant) memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant, sizeof(iwl_single_shared_ant)); @@ -649,8 +651,8 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost, sizeof(iwl_bt_prio_boost)); - memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut, - sizeof(iwl_bt_mprio_lut)); + bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0); + bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1); send_cmd: memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); @@ -830,6 +832,9 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; + if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) + smps_mode = IEEE80211_SMPS_AUTOMATIC; + IWL_DEBUG_COEX(data->mvm, "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n", mvmvif->id, data->notif->bt_status, bt_activity_grading, @@ -1162,6 +1167,12 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, return lut_type != BT_COEX_LOOSE_LUT; } +bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant) +{ + u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); + return ag < BT_HIGH_TRAFFIC; +} + bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm) { u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h index d4dfbe4cb66da5330b9e06251e0766a2ceaa8bea..3bd93476ec1c8fa855fb0576904b6393dd9594d4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/iwlwifi/mvm/constants.h @@ -92,8 +92,15 @@ #define IWL_MVM_BT_COEX_SYNC2SCO 1 #define IWL_MVM_BT_COEX_CORUNNING 0 #define IWL_MVM_BT_COEX_MPLUT 1 +#define IWL_MVM_BT_COEX_RRC 1 +#define IWL_MVM_BT_COEX_TTC 1 +#define IWL_MVM_BT_COEX_MPLUT_REG0 0x28412201 +#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451 +#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30 #define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0 +#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0 #define IWL_MVM_QUOTA_THRESHOLD 8 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 +#define IWL_MVM_RS_DISABLE_MIMO 0 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index c17be0fb7283a4f51c570002d1fc7c743d354daa..744de262373ea38acc55dc32e1f5d1f6181be154 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -601,33 +601,6 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, return ret; } -struct iwl_d3_iter_data { - struct iwl_mvm *mvm; - struct ieee80211_vif *vif; - bool error; -}; - -static void iwl_mvm_d3_iface_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_d3_iter_data *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) - return; - - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - if (data->vif) { - IWL_ERR(data->mvm, "More than one managed interface active!\n"); - data->error = true; - return; - } - - data->vif = vif; -} - static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *ap_sta) { @@ -783,130 +756,81 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) IWL_ERR(mvm, "failed to set non-QoS seqno\n"); } -static int -iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm, - const struct iwl_wowlan_config_cmd_v3 *cmd) +static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) { - /* start only with the v2 part of the command */ - u16 cmd_len = sizeof(cmd->common); - - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID) - cmd_len = sizeof(*cmd); - - return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, - cmd_len, cmd); -} - -static int __iwl_mvm_suspend(struct ieee80211_hw *hw, - struct cfg80211_wowlan *wowlan, - bool test) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_d3_iter_data suspend_iter_data = { - .mvm = mvm, - }; - struct ieee80211_vif *vif; - struct iwl_mvm_vif *mvmvif; - struct ieee80211_sta *ap_sta; - struct iwl_mvm_sta *mvm_ap_sta; - struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {}; - struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; - struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; - struct iwl_d3_manager_config d3_cfg_cmd_data = { - /* - * Program the minimum sleep time to 10 seconds, as many - * platforms have issues processing a wakeup signal while - * still being in the process of suspending. - */ - .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), - }; - struct iwl_host_cmd d3_cfg_cmd = { - .id = D3_CONFIG_CMD, - .flags = CMD_WANT_SKB, - .data[0] = &d3_cfg_cmd_data, - .len[0] = sizeof(d3_cfg_cmd_data), - }; - struct wowlan_key_data key_data = { - .use_rsc_tsc = false, - .tkip = &tkip_cmd, - .use_tkip = false, - }; - int ret; - int len __maybe_unused; - - if (!wowlan) { - /* - * mac80211 shouldn't get here, but for D3 test - * it doesn't warrant a warning - */ - WARN_ON(!test); - return -EINVAL; - } - - key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); - if (!key_data.rsc_tsc) - return -ENOMEM; + iwl_mvm_cancel_scan(mvm); - mutex_lock(&mvm->mutex); + iwl_trans_stop_device(mvm->trans); - /* see if there's only a single BSS vif and it's associated */ - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d3_iface_iterator, &suspend_iter_data); + /* + * Set the HW restart bit -- this is mostly true as we're + * going to load new firmware and reprogram that, though + * the reprogramming is going to be manual to avoid adding + * all the MACs that aren't support. + * We don't have to clear up everything though because the + * reprogramming is manual. When we resume, we'll actually + * go through a proper restart sequence again to switch + * back to the runtime firmware image. + */ + set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - if (suspend_iter_data.error || !suspend_iter_data.vif) { - ret = 1; - goto out_noreset; - } + /* We reprogram keys and shouldn't allocate new key indices */ + memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - vif = suspend_iter_data.vif; - mvmvif = iwl_mvm_vif_from_mac80211(vif); + mvm->ptk_ivlen = 0; + mvm->ptk_icvlen = 0; + mvm->ptk_ivlen = 0; + mvm->ptk_icvlen = 0; - ap_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(ap_sta)) { - ret = -EINVAL; - goto out_noreset; - } + return iwl_mvm_load_d3_fw(mvm); +} - mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv; +static int +iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, + struct ieee80211_sta *ap_sta) +{ + int ret; + struct iwl_mvm_sta *mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv; - /* TODO: wowlan_config_cmd.common.wowlan_ba_teardown_tids */ + /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ - wowlan_config_cmd.common.is_11n_connection = + wowlan_config_cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; /* Query the last used seqno and set it */ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); if (ret < 0) - goto out_noreset; - wowlan_config_cmd.common.non_qos_seq = cpu_to_le16(ret); + return ret; + + wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); - iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd.common); + iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); if (wowlan->disconnect) - wowlan_config_cmd.common.wakeup_filter |= + wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE); if (wowlan->magic_pkt) - wowlan_config_cmd.common.wakeup_filter |= + wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); if (wowlan->gtk_rekey_failure) - wowlan_config_cmd.common.wakeup_filter |= + wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); if (wowlan->eap_identity_req) - wowlan_config_cmd.common.wakeup_filter |= + wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); if (wowlan->four_way_handshake) - wowlan_config_cmd.common.wakeup_filter |= + wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); if (wowlan->n_patterns) - wowlan_config_cmd.common.wakeup_filter |= + wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); if (wowlan->rfkill_release) - wowlan_config_cmd.common.wakeup_filter |= + wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); if (wowlan->tcp) { @@ -914,44 +838,43 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, * Set the "link change" (really "link lost") flag as well * since that implies losing the TCP connection. */ - wowlan_config_cmd.common.wakeup_filter |= + wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | IWL_WOWLAN_WAKEUP_LINK_CHANGE); } - iwl_mvm_cancel_scan(mvm); - - iwl_trans_stop_device(mvm->trans); - - /* - * Set the HW restart bit -- this is mostly true as we're - * going to load new firmware and reprogram that, though - * the reprogramming is going to be manual to avoid adding - * all the MACs that aren't support. - * We don't have to clear up everything though because the - * reprogramming is manual. When we resume, we'll actually - * go through a proper restart sequence again to switch - * back to the runtime firmware image. - */ - set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - - /* We reprogram keys and shouldn't allocate new key indices */ - memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); + return 0; +} - mvm->ptk_ivlen = 0; - mvm->ptk_icvlen = 0; - mvm->ptk_ivlen = 0; - mvm->ptk_icvlen = 0; +static int +iwl_mvm_wowlan_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, + struct ieee80211_sta *ap_sta) +{ + struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; + struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; + struct wowlan_key_data key_data = { + .use_rsc_tsc = false, + .tkip = &tkip_cmd, + .use_tkip = false, + }; + int ret; - ret = iwl_mvm_load_d3_fw(mvm); + ret = iwl_mvm_switch_to_d3(mvm); if (ret) - goto out; + return ret; ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); if (ret) - goto out; + return ret; + + key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); + if (!key_data.rsc_tsc) + return -ENOMEM; if (!iwlwifi_mod_params.sw_crypto) { /* @@ -1010,7 +933,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, } } - ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, + sizeof(*wowlan_config_cmd), + wowlan_config_cmd); if (ret) goto out; @@ -1023,8 +948,153 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, goto out; ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp); + +out: + kfree(key_data.rsc_tsc); + return ret; +} + +static int +iwl_mvm_netdetect_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct cfg80211_sched_scan_request *nd_config, + struct ieee80211_vif *vif) +{ + struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; + int ret; + + ret = iwl_mvm_switch_to_d3(mvm); if (ret) - goto out; + return ret; + + /* rfkill release can be either for wowlan or netdetect */ + if (wowlan->rfkill_release) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, + sizeof(wowlan_config_cmd), + &wowlan_config_cmd); + if (ret) + return ret; + + ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies); + if (ret) + return ret; + + if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) + return -EBUSY; + + /* save the sched scan matchsets... */ + if (nd_config->n_match_sets) { + mvm->nd_match_sets = kmemdup(nd_config->match_sets, + sizeof(*nd_config->match_sets) * + nd_config->n_match_sets, + GFP_KERNEL); + if (mvm->nd_match_sets) + mvm->n_nd_match_sets = nd_config->n_match_sets; + } + + /* ...and the sched scan channels for later reporting */ + mvm->nd_channels = kmemdup(nd_config->channels, + sizeof(*nd_config->channels) * + nd_config->n_channels, + GFP_KERNEL); + if (mvm->nd_channels) + mvm->n_nd_channels = nd_config->n_channels; + + return 0; +} + +static void iwl_mvm_free_nd(struct iwl_mvm *mvm) +{ + kfree(mvm->nd_match_sets); + mvm->nd_match_sets = NULL; + mvm->n_nd_match_sets = 0; + kfree(mvm->nd_channels); + mvm->nd_channels = NULL; + mvm->n_nd_channels = 0; +} + +static int __iwl_mvm_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan, + bool test) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *vif = NULL; + struct iwl_mvm_vif *mvmvif = NULL; + struct ieee80211_sta *ap_sta = NULL; + struct iwl_d3_manager_config d3_cfg_cmd_data = { + /* + * Program the minimum sleep time to 10 seconds, as many + * platforms have issues processing a wakeup signal while + * still being in the process of suspending. + */ + .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), + }; + struct iwl_host_cmd d3_cfg_cmd = { + .id = D3_CONFIG_CMD, + .flags = CMD_WANT_SKB, + .data[0] = &d3_cfg_cmd_data, + .len[0] = sizeof(d3_cfg_cmd_data), + }; + int ret; + int len __maybe_unused; + + if (!wowlan) { + /* + * mac80211 shouldn't get here, but for D3 test + * it doesn't warrant a warning + */ + WARN_ON(!test); + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + + vif = iwl_mvm_get_bss_vif(mvm); + if (IS_ERR_OR_NULL(vif)) { + ret = 1; + goto out_noreset; + } + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) { + /* if we're not associated, this must be netdetect */ + if (!wowlan->nd_config && !mvm->nd_config) { + ret = 1; + goto out_noreset; + } + + ret = iwl_mvm_netdetect_config( + mvm, wowlan, wowlan->nd_config ?: mvm->nd_config, vif); + if (ret) + goto out; + + mvm->net_detect = true; + } else { + struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; + + ap_sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(ap_sta)) { + ret = -EINVAL; + goto out_noreset; + } + + ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, + vif, mvmvif, ap_sta); + if (ret) + goto out_noreset; + ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, + vif, mvmvif, ap_sta); + if (ret) + goto out; + + mvm->net_detect = false; + } ret = iwl_mvm_power_update_device(mvm); if (ret) @@ -1057,11 +1127,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, iwl_trans_d3_suspend(mvm->trans, test); out: - if (ret < 0) + if (ret < 0) { ieee80211_restart_hw(mvm->hw); + iwl_mvm_free_nd(mvm); + } out_noreset: - kfree(key_data.rsc_tsc); - mutex_unlock(&mvm->mutex); return ret; @@ -1071,6 +1141,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + iwl_trans_suspend(mvm->trans); if (iwl_mvm_is_d0i3_supported(mvm)) { mutex_lock(&mvm->d0i3_suspend_mutex); __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); @@ -1449,9 +1520,8 @@ out: return true; } -/* releases the MVM mutex */ -static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +static struct iwl_wowlan_status * +iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { u32 base = mvm->error_event_table; struct error_table_start { @@ -1463,19 +1533,15 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, .id = WOWLAN_GET_STATUSES, .flags = CMD_WANT_SKB, }; - struct iwl_wowlan_status_data status; - struct iwl_wowlan_status *fw_status; - int ret, len, status_size, i; - bool keep; - struct ieee80211_sta *ap_sta; - struct iwl_mvm_sta *mvm_ap_sta; + struct iwl_wowlan_status *status, *fw_status; + int ret, len, status_size; iwl_trans_read_mem_bytes(mvm->trans, base, &err_info, sizeof(err_info)); if (err_info.valid) { - IWL_INFO(mvm, "error table is valid (%d)\n", - err_info.valid); + IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n", + err_info.valid, err_info.error_id); if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { struct cfg80211_wowlan_wakeup wakeup = { .rfkill_release = true, @@ -1483,7 +1549,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); } - goto out_unlock; + return ERR_PTR(-EIO); } /* only for tracing for now */ @@ -1494,22 +1560,53 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "failed to query status (%d)\n", ret); - goto out_unlock; + return ERR_PTR(ret); } /* RF-kill already asserted again... */ - if (!cmd.resp_pkt) - goto out_unlock; + if (!cmd.resp_pkt) { + ret = -ERFKILL; + goto out_free_resp; + } status_size = sizeof(*fw_status); len = iwl_rx_packet_payload_len(cmd.resp_pkt); if (len < status_size) { IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + ret = -EIO; goto out_free_resp; } - fw_status = (void *)cmd.resp_pkt->data; + status = (void *)cmd.resp_pkt->data; + if (len != (status_size + + ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + ret = -EIO; + goto out_free_resp; + } + + fw_status = kmemdup(status, len, GFP_KERNEL); + +out_free_resp: + iwl_free_resp(&cmd); + return ret ? ERR_PTR(ret) : fw_status; +} + +/* releases the MVM mutex */ +static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_wowlan_status_data status; + struct iwl_wowlan_status *fw_status; + int i; + bool keep; + struct ieee80211_sta *ap_sta; + struct iwl_mvm_sta *mvm_ap_sta; + + fw_status = iwl_mvm_get_wakeup_status(mvm, vif); + if (IS_ERR_OR_NULL(fw_status)) + goto out_unlock; status.pattern_number = le16_to_cpu(fw_status->pattern_number); for (i = 0; i < 8; i++) @@ -1522,17 +1619,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, le32_to_cpu(fw_status->wake_packet_bufsize); status.wake_packet = fw_status->wake_packet; - if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - goto out_free_resp; - } - /* still at hard-coded place 0 for D3 image */ ap_sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[0], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(ap_sta)) - goto out_free_resp; + goto out_free; mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { @@ -1549,16 +1641,151 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); - iwl_free_resp(&cmd); + kfree(fw_status); return keep; - out_free_resp: - iwl_free_resp(&cmd); - out_unlock: +out_free: + kfree(fw_status); +out_unlock: mutex_unlock(&mvm->mutex); return false; } +struct iwl_mvm_nd_query_results { + u32 matched_profiles; + struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; +}; + +static int +iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, + struct iwl_mvm_nd_query_results *results) +{ + struct iwl_scan_offload_profiles_query *query; + struct iwl_host_cmd cmd = { + .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, + .flags = CMD_WANT_SKB, + }; + int ret, len; + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); + return ret; + } + + /* RF-kill already asserted again... */ + if (!cmd.resp_pkt) { + ret = -ERFKILL; + goto out_free_resp; + } + + len = iwl_rx_packet_payload_len(cmd.resp_pkt); + if (len < sizeof(*query)) { + IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); + ret = -EIO; + goto out_free_resp; + } + + query = (void *)cmd.resp_pkt->data; + + results->matched_profiles = le32_to_cpu(query->matched_profiles); + memcpy(results->matches, query->matches, sizeof(results->matches)); + +out_free_resp: + iwl_free_resp(&cmd); + return ret; +} + +static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct cfg80211_wowlan_nd_info *net_detect = NULL; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; + struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; + struct iwl_mvm_nd_query_results query; + struct iwl_wowlan_status *fw_status; + unsigned long matched_profiles; + u32 reasons = 0; + int i, j, n_matches, ret; + + fw_status = iwl_mvm_get_wakeup_status(mvm, vif); + if (!IS_ERR_OR_NULL(fw_status)) + reasons = le32_to_cpu(fw_status->wakeup_reasons); + + if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) + wakeup.rfkill_release = true; + + if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) + goto out; + + ret = iwl_mvm_netdetect_query_results(mvm, &query); + if (ret || !query.matched_profiles) { + wakeup_report = NULL; + goto out; + } + + matched_profiles = query.matched_profiles; + if (mvm->n_nd_match_sets) { + n_matches = hweight_long(matched_profiles); + } else { + IWL_ERR(mvm, "no net detect match information available\n"); + n_matches = 0; + } + + net_detect = kzalloc(sizeof(*net_detect) + + (n_matches * sizeof(net_detect->matches[0])), + GFP_KERNEL); + if (!net_detect || !n_matches) + goto out_report_nd; + + for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { + struct iwl_scan_offload_profile_match *fw_match; + struct cfg80211_wowlan_nd_match *match; + int n_channels = 0; + + fw_match = &query.matches[i]; + + for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++) + n_channels += hweight8(fw_match->matching_channels[j]); + + match = kzalloc(sizeof(*match) + + (n_channels * sizeof(*match->channels)), + GFP_KERNEL); + if (!match) + goto out_report_nd; + + net_detect->matches[net_detect->n_matches++] = match; + + match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len; + memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid, + match->ssid.ssid_len); + + if (mvm->n_nd_channels < n_channels) + continue; + + for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++) + if (fw_match->matching_channels[j / 8] & (BIT(j % 8))) + match->channels[match->n_channels++] = + mvm->nd_channels[j]->center_freq; + } + +out_report_nd: + wakeup.net_detect = net_detect; +out: + iwl_mvm_free_nd(mvm); + + mutex_unlock(&mvm->mutex); + ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); + + if (net_detect) { + for (i = 0; i < net_detect->n_matches; i++) + kfree(net_detect->matches[i]); + kfree(net_detect); + } +} + static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) { #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1592,9 +1819,6 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { - struct iwl_d3_iter_data resume_iter_data = { - .mvm = mvm, - }; struct ieee80211_vif *vif = NULL; int ret; enum iwl_d3_status d3_status; @@ -1603,15 +1827,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) mutex_lock(&mvm->mutex); /* get the BSS vif pointer again */ - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d3_iface_iterator, &resume_iter_data); - - if (WARN_ON(resume_iter_data.error || !resume_iter_data.vif)) + vif = iwl_mvm_get_bss_vif(mvm); + if (IS_ERR_OR_NULL(vif)) goto out_unlock; - vif = resume_iter_data.vif; - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); if (ret) goto out_unlock; @@ -1624,11 +1843,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) /* query SRAM first in case we want event logging */ iwl_mvm_read_d3_sram(mvm); - keep = iwl_mvm_query_wakeup_reasons(mvm, vif); + if (mvm->net_detect) { + iwl_mvm_query_netdetect_reasons(mvm, vif); + } else { + keep = iwl_mvm_query_wakeup_reasons(mvm, vif); #ifdef CONFIG_IWLWIFI_DEBUGFS - if (keep) - mvm->keep_vif = vif; + if (keep) + mvm->keep_vif = vif; #endif + } /* has unlocked the mutex, so skip that */ goto out; @@ -1643,6 +1866,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) /* return 1 to reconfigure the device */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); return 1; } @@ -1650,18 +1874,10 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - if (iwl_mvm_is_d0i3_supported(mvm)) { - bool exit_now; + iwl_trans_resume(mvm->trans); - mutex_lock(&mvm->d0i3_suspend_mutex); - __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); - exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, - &mvm->d0i3_suspend_flags); - mutex_unlock(&mvm->d0i3_suspend_mutex); - if (exit_now) - _iwl_mvm_exit_d0i3(mvm); + if (iwl_mvm_is_d0i3_supported(mvm)) return 0; - } return __iwl_mvm_resume(mvm, false); } @@ -1741,7 +1957,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) int remaining_time = 10; mvm->d3_test_active = false; + rtnl_lock(); __iwl_mvm_resume(mvm, true); + rtnl_unlock(); iwl_abort_notification_waits(&mvm->notif_wait); ieee80211_restart_hw(mvm->hw); diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 50527a9bb26739b572e1b0faad8b7afa5114662d..33bf915cd7eabfd4a8b198a1a2394c8af7e62e44 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -121,78 +121,6 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, return ret; } -static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file) -{ - struct iwl_mvm *mvm = inode->i_private; - int ret; - - if (!mvm) - return -EINVAL; - - mutex_lock(&mvm->mutex); - if (!mvm->fw_error_dump) { - ret = -ENODATA; - goto out; - } - - file->private_data = mvm->fw_error_dump; - mvm->fw_error_dump = NULL; - ret = 0; - -out: - mutex_unlock(&mvm->mutex); - return ret; -} - -static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_dump_ptrs *dump_ptrs = (void *)file->private_data; - ssize_t bytes_read = 0; - ssize_t bytes_read_trans = 0; - - if (*ppos < dump_ptrs->op_mode_len) - bytes_read += - simple_read_from_buffer(user_buf, count, ppos, - dump_ptrs->op_mode_ptr, - dump_ptrs->op_mode_len); - - if (bytes_read < 0 || *ppos < dump_ptrs->op_mode_len) - return bytes_read; - - if (dump_ptrs->trans_ptr) { - *ppos -= dump_ptrs->op_mode_len; - bytes_read_trans = - simple_read_from_buffer(user_buf + bytes_read, - count - bytes_read, ppos, - dump_ptrs->trans_ptr->data, - dump_ptrs->trans_ptr->len); - *ppos += dump_ptrs->op_mode_len; - - if (bytes_read_trans >= 0) - bytes_read += bytes_read_trans; - else if (!bytes_read) - /* propagate the failure */ - return bytes_read_trans; - } - - return bytes_read; - -} - -static int iwl_dbgfs_fw_error_dump_release(struct inode *inode, - struct file *file) -{ - struct iwl_mvm_dump_ptrs *dump_ptrs = (void *)file->private_data; - - vfree(dump_ptrs->op_mode_ptr); - vfree(dump_ptrs->trans_ptr); - kfree(dump_ptrs); - - return 0; -} - static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1008,7 +936,11 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, if (scan_rx_ant & ~mvm->fw->valid_rx_ant) return -EINVAL; - mvm->scan_rx_ant = scan_rx_ant; + if (mvm->scan_rx_ant != scan_rx_ant) { + mvm->scan_rx_ant = scan_rx_ant; + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + iwl_mvm_config_scan(mvm); + } return count; } @@ -1250,6 +1182,118 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf, return ret; } + +#define MAX_NUM_ND_MATCHSETS 10 + +static ssize_t iwl_dbgfs_netdetect_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + const char *seps = ",\n"; + char *buf_ptr = buf; + char *value_str = NULL; + int ret, i; + + /* TODO: don't free if write is being called several times in one go */ + if (mvm->nd_config) { + kfree(mvm->nd_config->match_sets); + kfree(mvm->nd_config); + mvm->nd_config = NULL; + } + + mvm->nd_config = kzalloc(sizeof(*mvm->nd_config) + + (11 * sizeof(struct ieee80211_channel *)), + GFP_KERNEL); + if (!mvm->nd_config) { + ret = -ENOMEM; + goto out_free; + } + + mvm->nd_config->n_channels = 11; + mvm->nd_config->scan_width = NL80211_BSS_CHAN_WIDTH_20; + mvm->nd_config->interval = 5; + mvm->nd_config->min_rssi_thold = -80; + for (i = 0; i < mvm->nd_config->n_channels; i++) + mvm->nd_config->channels[i] = &mvm->nvm_data->channels[i]; + + mvm->nd_config->match_sets = + kcalloc(MAX_NUM_ND_MATCHSETS, + sizeof(*mvm->nd_config->match_sets), + GFP_KERNEL); + if (!mvm->nd_config->match_sets) { + ret = -ENOMEM; + goto out_free; + } + + while ((value_str = strsep(&buf_ptr, seps)) && + strlen(value_str)) { + struct cfg80211_match_set *set; + + if (mvm->nd_config->n_match_sets >= MAX_NUM_ND_MATCHSETS) { + ret = -EINVAL; + goto out_free; + } + + set = &mvm->nd_config->match_sets[mvm->nd_config->n_match_sets]; + set->ssid.ssid_len = strlen(value_str); + + if (set->ssid.ssid_len > IEEE80211_MAX_SSID_LEN) { + ret = -EINVAL; + goto out_free; + } + + memcpy(set->ssid.ssid, value_str, set->ssid.ssid_len); + + mvm->nd_config->n_match_sets++; + } + + ret = count; + + if (mvm->nd_config->n_match_sets) + goto out; + +out_free: + if (mvm->nd_config) + kfree(mvm->nd_config->match_sets); + kfree(mvm->nd_config); + mvm->nd_config = NULL; +out: + return ret; +} + +static ssize_t +iwl_dbgfs_netdetect_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + size_t bufsz, ret; + char *buf; + int i, n_match_sets, pos = 0; + + n_match_sets = mvm->nd_config ? mvm->nd_config->n_match_sets : 0; + + bufsz = n_match_sets * (IEEE80211_MAX_SSID_LEN + 1) + 1; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < n_match_sets; i++) { + if (pos + + mvm->nd_config->match_sets[i].ssid.ssid_len + 2 > bufsz) { + ret = -EIO; + goto out; + } + + memcpy(buf + pos, mvm->nd_config->match_sets[i].ssid.ssid, + mvm->nd_config->match_sets[i].ssid.ssid_len); + pos += mvm->nd_config->match_sets[i].ssid.ssid_len; + buf[pos++] = '\n'; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); +out: + kfree(buf); + return ret; +} #endif #define PRINT_MVM_REF(ref) do { \ @@ -1295,6 +1339,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, PRINT_MVM_REF(IWL_MVM_REF_NMI); PRINT_MVM_REF(IWL_MVM_REF_TM_CMD); PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK); + PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -1415,12 +1460,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); -static const struct file_operations iwl_dbgfs_fw_error_dump_ops = { - .open = iwl_dbgfs_fw_error_dump_open, - .read = iwl_dbgfs_fw_error_dump_read, - .release = iwl_dbgfs_fw_error_dump_release, -}; - #ifdef CONFIG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); @@ -1428,6 +1467,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(netdetect, 384); #endif int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) @@ -1446,7 +1486,6 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) S_IWUSR | S_IRUSR); MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, @@ -1487,6 +1526,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR, mvm->debugfs_dir, &mvm->d3_wake_sysassert)) goto err; + MVM_DEBUGFS_ADD_FILE(netdetect, mvm->debugfs_dir, S_IRUSR | S_IWUSR); #endif if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR, diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h index 816883f9ff94f541906cb945d0bf8002f8f3770b..f3b11897991eeee2e1d88329d075e52a25a106e3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h @@ -84,6 +84,8 @@ * @BT_COEX_SYNC2SCO: * @BT_COEX_CORUNNING: * @BT_COEX_MPLUT: + * @BT_COEX_TTC: + * @BT_COEX_RRC: * * The COEX_MODE must be set for each command. Even if it is not changed. */ @@ -100,6 +102,8 @@ enum iwl_bt_coex_flags { BT_COEX_SYNC2SCO = BIT(7), BT_COEX_CORUNNING = BIT(8), BT_COEX_MPLUT = BIT(9), + BT_COEX_TTC = BIT(20), + BT_COEX_RRC = BIT(21), }; /* @@ -127,6 +131,8 @@ enum iwl_bt_coex_valid_bit_msk { BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16), BT_VALID_TXRX_MAX_FREQ_0 = BIT(17), BT_VALID_SYNC_TO_SCO = BIT(18), + BT_VALID_TTC = BIT(20), + BT_VALID_RRC = BIT(21), }; /** @@ -506,7 +512,8 @@ struct iwl_bt_coex_profile_notif_old { u8 bt_agg_traffic_load; u8 bt_ci_compliance; u8 ttc_enabled; - __le16 reserved; + u8 rrc_enabled; + u8 reserved; __le32 primary_ch_lut; __le32 secondary_ch_lut; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h index e74cdf2132f87a5a94ec2ff9f0c588b709833f24..6d3bea5c59d1eb8ab42bca2f0c1a4523a23b8291 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h @@ -241,16 +241,12 @@ enum iwl_wowlan_wakeup_filters { IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16), }; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ -struct iwl_wowlan_config_cmd_v2 { +struct iwl_wowlan_config_cmd { __le32 wakeup_filter; __le16 non_qos_seq; __le16 qos_seq[8]; u8 wowlan_ba_teardown_tids; u8 is_11n_connection; -} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */ - -struct iwl_wowlan_config_cmd_v3 { - struct iwl_wowlan_config_cmd_v2 common; u8 offloading_tid; u8 reserved[3]; } __packed; /* WOWLAN_CONFIG_API_S_VER_3 */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index 2fd8ad4633e0ee717096461398c96fc67181d288..430020047b777e5ff830d2ca0485cff86b64278e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -370,7 +370,7 @@ struct iwl_beacon_filter_cmd { #define IWL_BF_DEBUG_FLAG_DEFAULT 0 #define IWL_BF_DEBUG_FLAG_D0I3 0 -#define IWL_BF_ESCAPE_TIMER_DEFAULT 50 +#define IWL_BF_ESCAPE_TIMER_DEFAULT 0 #define IWL_BF_ESCAPE_TIMER_D0I3 0 #define IWL_BF_ESCAPE_TIMER_MAX 1024 #define IWL_BF_ESCAPE_TIMER_MIN 0 diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 1354c68f6468a79d78da53882e053aea6c900d9c..1f2acf47bfb2c78ddb1ee623f9254e35fa8eb109 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -794,4 +794,301 @@ struct iwl_periodic_scan_complete { __le32 reserved; } __packed; +/* UMAC Scan API */ + +/** + * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands + * @size: size of the command (not including header) + * @reserved0: for future use and alignment + * @ver: API version number + */ +struct iwl_mvm_umac_cmd_hdr { + __le16 size; + u8 reserved0; + u8 ver; +} __packed; + +#define IWL_MVM_MAX_SIMULTANEOUS_SCANS 8 + +enum scan_config_flags { + SCAN_CONFIG_FLAG_ACTIVATE = BIT(0), + SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1), + SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2), + SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3), + SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8), + SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9), + SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10), + SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11), + SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12), + SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13), + SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14), + SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15), + SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16), + SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17), + SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18), + SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19), + SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20), + SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21), + + /* Bits 26-31 are for num of channels in channel_array */ +#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26) +}; + +enum scan_config_rates { + /* OFDM basic rates */ + SCAN_CONFIG_RATE_6M = BIT(0), + SCAN_CONFIG_RATE_9M = BIT(1), + SCAN_CONFIG_RATE_12M = BIT(2), + SCAN_CONFIG_RATE_18M = BIT(3), + SCAN_CONFIG_RATE_24M = BIT(4), + SCAN_CONFIG_RATE_36M = BIT(5), + SCAN_CONFIG_RATE_48M = BIT(6), + SCAN_CONFIG_RATE_54M = BIT(7), + /* CCK basic rates */ + SCAN_CONFIG_RATE_1M = BIT(8), + SCAN_CONFIG_RATE_2M = BIT(9), + SCAN_CONFIG_RATE_5M = BIT(10), + SCAN_CONFIG_RATE_11M = BIT(11), + + /* Bits 16-27 are for supported rates */ +#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16) +}; + +enum iwl_channel_flags { + IWL_CHANNEL_FLAG_EBS = BIT(0), + IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1), + IWL_CHANNEL_FLAG_EBS_ADD = BIT(2), + IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3), +}; + +/** + * struct iwl_scan_config + * @hdr: umac command header + * @flags: enum scan_config_flags + * @tx_chains: valid_tx antenna - ANT_* definitions + * @rx_chains: valid_rx antenna - ANT_* definitions + * @legacy_rates: default legacy rates - enum scan_config_rates + * @out_of_channel_time: default max out of serving channel time + * @suspend_time: default max suspend time + * @dwell_active: default dwell time for active scan + * @dwell_passive: default dwell time for passive scan + * @dwell_fragmented: default dwell time for fragmented scan + * @reserved: for future use and alignment + * @mac_addr: default mac address to be used in probes + * @bcast_sta_id: the index of the station in the fw + * @channel_flags: default channel flags - enum iwl_channel_flags + * scan_config_channel_flag + * @channel_array: default supported channels + */ +struct iwl_scan_config { + struct iwl_mvm_umac_cmd_hdr hdr; + __le32 flags; + __le32 tx_chains; + __le32 rx_chains; + __le32 legacy_rates; + __le32 out_of_channel_time; + __le32 suspend_time; + u8 dwell_active; + u8 dwell_passive; + u8 dwell_fragmented; + u8 reserved; + u8 mac_addr[ETH_ALEN]; + u8 bcast_sta_id; + u8 channel_flags; + u8 channel_array[]; +} __packed; /* SCAN_CONFIG_DB_CMD_API_S */ + +/** + * iwl_umac_scan_flags + *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request + * can be preempted by other scan requests with higher priority. + * The low priority scan is aborted. + *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver + * when scan starts. + */ +enum iwl_umac_scan_flags { + IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0), + IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1), +}; + +enum iwl_umac_scan_uid_offsets { + IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0, + IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8, +}; + +enum iwl_umac_scan_general_flags { + IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), + IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), + IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), + IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), + IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), + IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), + IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), + IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), + IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), + IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9) +}; + +/** + * struct iwl_scan_channel_cfg_umac + * @flags: bitmap - 0-19: directed scan to i'th ssid. + * @channel_num: channel number 1-13 etc. + * @iter_count: repetition count for the channel. + * @iter_interval: interval between two scan interations on one channel. + */ +struct iwl_scan_channel_cfg_umac { + __le32 flags; + u8 channel_num; + u8 iter_count; + __le16 iter_interval; +} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */ + +/** + * struct iwl_scan_umac_schedule + * @interval: interval in seconds between scan iterations + * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop + * @reserved: for alignment and future use + */ +struct iwl_scan_umac_schedule { + __le16 interval; + u8 iter_count; + u8 reserved; +} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */ + +/** + * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command + * parameters following channels configuration array. + * @schedule: two scheduling plans. + * @delay: delay in TUs before starting the first scan iteration + * @reserved: for future use and alignment + * @preq: probe request with IEs blocks + * @direct_scan: list of SSIDs for directed active scan + */ +struct iwl_scan_req_umac_tail { + /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ + struct iwl_scan_umac_schedule schedule[2]; + __le16 delay; + __le16 reserved; + /* SCAN_PROBE_PARAMS_API_S_VER_1 */ + struct iwl_scan_probe_req preq; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; +} __packed; + +/** + * struct iwl_scan_req_umac + * @hdr: umac command header + * @flags: &enum iwl_umac_scan_flags + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @ooc_priority: out of channel priority - &enum iwl_scan_priority + * @general_flags: &enum iwl_umac_scan_general_flags + * @reserved1: for future use and alignment + * @active_dwell: dwell time for active scan + * @passive_dwell: dwell time for passive scan + * @fragmented_dwell: dwell time for fragmented passive scan + * @max_out_time: max out of serving channel time + * @suspend_time: max suspend time + * @scan_priority: scan internal prioritization &enum iwl_scan_priority + * @channel_flags: &enum iwl_scan_channel_flags + * @n_channels: num of channels in scan request + * @reserved2: for future use and alignment + * @data: &struct iwl_scan_channel_cfg_umac and + * &struct iwl_scan_req_umac_tail + */ +struct iwl_scan_req_umac { + struct iwl_mvm_umac_cmd_hdr hdr; + __le32 flags; + __le32 uid; + __le32 ooc_priority; + /* SCAN_GENERAL_PARAMS_API_S_VER_1 */ + __le32 general_flags; + u8 reserved1; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + __le32 max_out_time; + __le32 suspend_time; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved2; + u8 data[]; +} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ + +/** + * struct iwl_umac_scan_abort + * @hdr: umac command header + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @flags: reserved + */ +struct iwl_umac_scan_abort { + struct iwl_mvm_umac_cmd_hdr hdr; + __le32 uid; + __le32 flags; +} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ + +/** + * struct iwl_umac_scan_complete + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @last_schedule: last scheduling line + * @last_iter: last scan iteration number + * @scan status: &enum iwl_scan_offload_complete_status + * @ebs_status: &enum iwl_scan_ebs_status + * @time_from_last_iter: time elapsed from last iteration + * @reserved: for future use + */ +struct iwl_umac_scan_complete { + __le32 uid; + u8 last_schedule; + u8 last_iter; + u8 status; + u8 ebs_status; + __le32 time_from_last_iter; + __le32 reserved; +} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */ + +#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5 +/** + * struct iwl_scan_offload_profile_match - match information + * @bssid: matched bssid + * @channel: channel where the match occurred + * @energy: + * @matching_feature: + * @matching_channels: bitmap of channels that matched, referencing + * the channels passed in tue scan offload request + */ +struct iwl_scan_offload_profile_match { + u8 bssid[ETH_ALEN]; + __le16 reserved; + u8 channel; + u8 energy; + u8 matching_feature; + u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN]; +} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */ + +/** + * struct iwl_scan_offload_profiles_query - match results query response + * @matched_profiles: bitmap of matched profiles, referencing the + * matches passed in the scan offload request + * @last_scan_age: age of the last offloaded scan + * @n_scans_done: number of offloaded scans done + * @gp2_d0u: GP2 when D0U occurred + * @gp2_invoked: GP2 when scan offload was invoked + * @resume_while_scanning: not used + * @self_recovery: obsolete + * @reserved: reserved + * @matches: array of match information, one for each match + */ +struct iwl_scan_offload_profiles_query { + __le32 matched_profiles; + __le32 last_scan_age; + __le32 n_scans_done; + __le32 gp2_d0u; + __le32 gp2_invoked; + u8 resume_while_scanning; + u8 self_recovery; + __le16 reserved; + struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; +} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ + #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index c62575d86bcdbe5e75c724cacf8605e8569309e6..88af6dd2ceaa4034e7dd167fad32fbb22fcc2fed 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -106,6 +106,12 @@ enum { DBG_CFG = 0x9, ANTENNA_COUPLING_NOTIFICATION = 0xa, + /* UMAC scan commands */ + SCAN_CFG_CMD = 0xc, + SCAN_REQ_UMAC = 0xd, + SCAN_ABORT_UMAC = 0xe, + SCAN_COMPLETE_UMAC = 0xf, + /* station table */ ADD_STA_KEY = 0x17, ADD_STA = 0x18, @@ -122,6 +128,11 @@ enum { /* global key */ WEP_KEY = 0x20, + /* TDLS */ + TDLS_CHANNEL_SWITCH_CMD = 0x27, + TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, + TDLS_CONFIG_CMD = 0xa7, + /* MAC and Binding commands */ MAC_CONTEXT_CMD = 0x28, TIME_EVENT_CMD = 0x29, /* both CMD and response */ @@ -190,6 +201,8 @@ enum { /* Power - new power table command */ MAC_PM_POWER_TABLE = 0xa9, + MFUART_LOAD_NOTIFICATION = 0xb1, + REPLY_RX_PHY_CMD = 0xc0, REPLY_RX_MPDU_CMD = 0xc1, BA_NOTIF = 0xc5, @@ -236,11 +249,9 @@ enum { WOWLAN_TX_POWER_PER_DB = 0xe6, /* and for NetDetect */ - NET_DETECT_CONFIG_CMD = 0x54, - NET_DETECT_PROFILES_QUERY_CMD = 0x56, - NET_DETECT_PROFILES_CMD = 0x57, - NET_DETECT_HOTSPOTS_CMD = 0x58, - NET_DETECT_HOTSPOTS_QUERY_CMD = 0x59, + SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, + SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58, + SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59, REPLY_MAX = 0xff, }; @@ -1200,6 +1211,21 @@ struct iwl_missed_beacons_notif { __le32 num_recvd_beacons; } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ +/** + * struct iwl_mfuart_load_notif - mfuart image version & status + * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) + * @installed_ver: installed image version + * @external_ver: external image version + * @status: MFUART loading status + * @duration: MFUART loading time +*/ +struct iwl_mfuart_load_notif { + __le32 installed_ver; + __le32 external_ver; + __le32 status; + __le32 duration; +} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/ + /** * struct iwl_set_calib_default_cmd - set default value for calibration. * ( SET_CALIB_DEFAULT_CMD = 0x8e ) @@ -1589,7 +1615,7 @@ enum iwl_sf_scenario { #define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ /* smart FIFO default values */ -#define SF_W_MARK_SISO 4096 +#define SF_W_MARK_SISO 6144 #define SF_W_MARK_MIMO2 8192 #define SF_W_MARK_MIMO3 6144 #define SF_W_MARK_LEGACY 4096 @@ -1711,4 +1737,145 @@ struct iwl_scd_txq_cfg_cmd { u8 flags; } __packed; +/*********************************** + * TDLS API + ***********************************/ + +/* Type of TDLS request */ +enum iwl_tdls_channel_switch_type { + TDLS_SEND_CHAN_SW_REQ = 0, + TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, + TDLS_MOVE_CH, +}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ + +/** + * Switch timing sub-element in a TDLS channel-switch command + * @frame_timestamp: GP2 timestamp of channel-switch request/response packet + * received from peer + * @max_offchan_duration: What amount of microseconds out of a DTIM is given + * to the TDLS off-channel communication. For instance if the DTIM is + * 200TU and the TDLS peer is to be given 25% of the time, the value + * given will be 50TU, or 50 * 1024 if translated into microseconds. + * @switch_time: switch time the peer sent in its channel switch timing IE + * @switch_timout: switch timeout the peer sent in its channel switch timing IE + */ +struct iwl_tdls_channel_switch_timing { + __le32 frame_timestamp; /* GP2 time of peer packet Rx */ + __le32 max_offchan_duration; /* given in micro-seconds */ + __le32 switch_time; /* given in micro-seconds */ + __le32 switch_timeout; /* given in micro-seconds */ +} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ + +#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 + +/** + * TDLS channel switch frame template + * + * A template representing a TDLS channel-switch request or response frame + * + * @switch_time_offset: offset to the channel switch timing IE in the template + * @tx_cmd: Tx parameters for the frame + * @data: frame data + */ +struct iwl_tdls_channel_switch_frame { + __le32 switch_time_offset; + struct iwl_tx_cmd tx_cmd; + u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ + +/** + * TDLS channel switch command + * + * The command is sent to initiate a channel switch and also in response to + * incoming TDLS channel-switch request/response packets from remote peers. + * + * @switch_type: see &enum iwl_tdls_channel_switch_type + * @peer_sta_id: station id of TDLS peer + * @ci: channel we switch to + * @timing: timing related data for command + * @frame: channel-switch request/response template, depending to switch_type + */ +struct iwl_tdls_channel_switch_cmd { + u8 switch_type; + __le32 peer_sta_id; + struct iwl_fw_channel_info ci; + struct iwl_tdls_channel_switch_timing timing; + struct iwl_tdls_channel_switch_frame frame; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ + +/** + * TDLS channel switch start notification + * + * @status: non-zero on success + * @offchannel_duration: duration given in microseconds + * @sta_id: peer currently performing the channel-switch with + */ +struct iwl_tdls_channel_switch_notif { + __le32 status; + __le32 offchannel_duration; + __le32 sta_id; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ + +/** + * TDLS station info + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx + * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer + * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise + */ +struct iwl_tdls_sta_info { + u8 sta_id; + u8 tx_to_peer_tid; + __le16 tx_to_peer_ssn; + __le32 is_initiator; +} __packed; /* TDLS_STA_INFO_VER_1 */ + +/** + * TDLS basic config command + * + * @id_and_color: MAC id and color being configured + * @tdls_peer_count: amount of currently connected TDLS peers + * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx + * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP + * @sta_info: per-station info. Only the first tdls_peer_count entries are set + * @pti_req_data_offset: offset of network-level data for the PTI template + * @pti_req_tx_cmd: Tx parameters for PTI request template + * @pti_req_template: PTI request template data + */ +struct iwl_tdls_config_cmd { + __le32 id_and_color; /* mac id and color */ + u8 tdls_peer_count; + u8 tx_to_ap_tid; + __le16 tx_to_ap_ssn; + struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; + + __le32 pti_req_data_offset; + struct iwl_tx_cmd pti_req_tx_cmd; + u8 pti_req_template[0]; +} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ + +/** + * TDLS per-station config information from FW + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to + * the peer + */ +struct iwl_tdls_config_sta_info_res { + __le16 sta_id; + __le16 tx_to_peer_last_seq; +} __packed; /* TDLS_STA_INFO_RSP_VER_1 */ + +/** + * TDLS config information from FW + * + * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP + * @sta_info: per-station TDLS config information + */ +struct iwl_tdls_config_res { + __le32 tx_to_ap_last_seq; + struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; +} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index eb03943f846326207061a2ed63e95df09af93caf..d0fa6e9ed59098cb52e05102e72afbffe3719fc8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -186,7 +186,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, static const u8 alive_cmd[] = { MVM_ALIVE }; struct iwl_sf_region st_fwrd_space; - fw = iwl_get_ucode_image(mvm, ucode_type); + if (ucode_type == IWL_UCODE_REGULAR && + iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_CUSTOM) && + iwl_fw_dbg_conf_enabled(mvm->fw, FW_DBG_CUSTOM)) + fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); + else + fw = iwl_get_ucode_image(mvm, ucode_type); if (WARN_ON(!fw)) return -EINVAL; mvm->cur_ucode = ucode_type; @@ -227,6 +232,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, st_fwrd_space.addr = mvm->sf_space.addr; st_fwrd_space.size = mvm->sf_space.size; ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space); + if (ret) { + IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret); + return ret; + } iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); @@ -390,6 +399,42 @@ out: return ret; } +static int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, + enum iwl_fw_dbg_conf conf_id) +{ + u8 *ptr; + int ret; + int i; + + if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv), + "Invalid configuration %d\n", conf_id)) + return -EINVAL; + + if (!mvm->fw->dbg_conf_tlv[conf_id]) + return -EINVAL; + + if (mvm->fw_dbg_conf != FW_DBG_INVALID) + IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n", + mvm->fw_dbg_conf); + + /* Send all HCMDs for configuring the FW debug */ + ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd; + for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { + struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0, + le16_to_cpu(cmd->len), cmd->data); + if (ret) + return ret; + + ptr += sizeof(*cmd); + ptr += le16_to_cpu(cmd->len); + } + + mvm->fw_dbg_conf = conf_id; + return ret; +} + int iwl_mvm_up(struct iwl_mvm *mvm) { int ret, i; @@ -441,6 +486,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); + mvm->fw_dbg_conf = FW_DBG_INVALID; + iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM); + ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); if (ret) goto error; @@ -462,6 +510,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) for (i = 0; i < IWL_MVM_STATION_COUNT; i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); @@ -501,6 +551,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + ret = iwl_mvm_config_scan(mvm); + if (ret) + goto error; + } + /* allow FW/transport low power modes if not during restart */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -587,3 +643,19 @@ int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, le32_to_cpu(radio_version->radio_dash)); return 0; } + +int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; + + IWL_DEBUG_INFO(mvm, + "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", + le32_to_cpu(mfuart_notif->installed_ver), + le32_to_cpu(mfuart_notif->external_ver), + le32_to_cpu(mfuart_notif->status), + le32_to_cpu(mfuart_notif->duration)); + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 0c5c0b0e23f5bbe167193c12bb954fbd2dfe8ba1..f6d86ccce6a8c3d705c18c252f839ff81e4d6f86 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -83,11 +83,15 @@ struct iwl_mvm_mac_iface_iterator_data { struct ieee80211_vif *vif; unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; - u32 used_hw_queues; enum iwl_tsf_id preferred_tsf; bool found_vif; }; +struct iwl_mvm_hw_queues_iface_iterator_data { + struct ieee80211_vif *exclude_vif; + unsigned long used_hw_queues; +}; + static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { @@ -197,8 +201,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, /* * Get the mask of the queues used by the vif */ -u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif) { u32 qmask = 0, ac; @@ -214,6 +217,54 @@ u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, return qmask; } +static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; + + /* exclude the given vif */ + if (vif == data->exclude_vif) + return; + + data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif); +} + +static void iwl_mvm_mac_sta_hw_queues_iter(void *_data, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + /* Mark the queues used by the sta */ + data->used_hw_queues |= mvmsta->tfd_queue_msk; +} + +unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *exclude_vif) +{ + struct iwl_mvm_hw_queues_iface_iterator_data data = { + .exclude_vif = exclude_vif, + .used_hw_queues = + BIT(IWL_MVM_OFFCHANNEL_QUEUE) | + BIT(mvm->aux_queue) | + BIT(IWL_MVM_CMD_QUEUE), + }; + + lockdep_assert_held(&mvm->mutex); + + /* mark all VIF used hw queues */ + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mvm_iface_hw_queues_iter, &data); + + /* don't assign the same hw queues as TDLS stations */ + ieee80211_iterate_stations_atomic(mvm->hw, + iwl_mvm_mac_sta_hw_queues_iter, + &data); + + return data.used_hw_queues; +} + static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { @@ -226,9 +277,6 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, return; } - /* Mark the queues used by the vif */ - data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(data->mvm, vif); - /* Mark MAC IDs as used by clearing the available bit, and * (below) mark TSFs as used if their existing use is not * compatible with the new interface type. @@ -275,10 +323,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, - .used_hw_queues = - BIT(IWL_MVM_OFFCHANNEL_QUEUE) | - BIT(mvm->aux_queue) | - BIT(IWL_MVM_CMD_QUEUE), .found_vif = false, }; u32 ac; @@ -317,6 +361,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_iface_iterator, &data); + used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif); + /* * In the case we're getting here during resume, it's similar to * firmware restart, and with RESUME_ALL the iterator will find @@ -366,8 +412,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return 0; } - used_hw_queues = data.used_hw_queues; - /* Find available queues, and allocate them to the ACs */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { u8 queue = find_first_zero_bit(&used_hw_queues, @@ -1219,17 +1263,25 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, - struct ieee80211_vif *csa_vif, u32 gp2) + struct ieee80211_vif *csa_vif, u32 gp2, + bool tx_success) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); + /* Don't start to countdown from a failed beacon */ + if (!tx_success && !mvmvif->csa_countdown) + return; + + mvmvif->csa_countdown = true; + if (!ieee80211_csa_is_complete(csa_vif)) { int c = ieee80211_csa_update_counter(csa_vif); iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif); if (csa_vif->p2p && - !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2) { + !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && + tx_success) { u32 rel_time = (c + 1) * csa_vif->bss_conf.beacon_int - IWL_MVM_CHANNEL_SWITCH_TIME_GO; @@ -1252,38 +1304,30 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; struct iwl_mvm_tx_resp *beacon_notify_hdr; struct ieee80211_vif *csa_vif; struct ieee80211_vif *tx_blocked_vif; - u64 tsf; + u16 status; lockdep_assert_held(&mvm->mutex); - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_CAPA_EXTENDED_BEACON) { - struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; - - beacon_notify_hdr = &beacon->beacon_notify_hdr; - tsf = le64_to_cpu(beacon->tsf); - mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); - } else { - struct iwl_beacon_notif *beacon = (void *)pkt->data; - - beacon_notify_hdr = &beacon->beacon_notify_hdr; - tsf = le64_to_cpu(beacon->tsf); - } + beacon_notify_hdr = &beacon->beacon_notify_hdr; + mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); + status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n", - le16_to_cpu(beacon_notify_hdr->status.status) & - TX_STATUS_MSK, - beacon_notify_hdr->failure_frame, tsf, + status, beacon_notify_hdr->failure_frame, + le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2, le32_to_cpu(beacon_notify_hdr->initial_rate)); csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (unlikely(csa_vif && csa_vif->csa_active)) - iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2); + iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2, + (status == TX_STATUS_SUCCESS)); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index b6d2683da3a96dab9c53d0c6f291ab69de6e9e8f..31a5b3f4266c3edaf26a05dfca208ddd2082f8ca 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -69,6 +69,7 @@ #include #include #include +#include #include #include #include @@ -253,6 +254,26 @@ static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, spin_unlock_bh(&mvm->refs_lock); } +bool iwl_mvm_ref_taken(struct iwl_mvm *mvm) +{ + int i; + bool taken = false; + + if (!iwl_mvm_is_d0i3_supported(mvm)) + return true; + + spin_lock_bh(&mvm->refs_lock); + for (i = 0; i < IWL_MVM_REF_COUNT; i++) { + if (mvm->refs[i]) { + taken = true; + break; + } + } + spin_unlock_bh(&mvm->refs_lock); + + return taken; +} + int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) { iwl_mvm_ref(mvm, ref_type); @@ -302,7 +323,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; - hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC; + hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | + IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; hw->rate_control_algorithm = "iwl-mvm-rs"; /* @@ -315,15 +337,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->flags |= IEEE80211_HW_MFP_CAPABLE; if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT && - IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 && !iwlwifi_mod_params.uapsd_disable) { hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; } - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN || + mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS; + hw->wiphy->features |= + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + } hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); @@ -343,8 +369,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD) hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_CSA_FLOW) - hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; hw->wiphy->n_iface_combinations = @@ -402,7 +427,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_P2P_GO_OPPPS | NL80211_FEATURE_DYNAMIC_SMPS | - NL80211_FEATURE_STATIC_SMPS; + NL80211_FEATURE_STATIC_SMPS | + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) @@ -440,7 +466,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; + WIPHY_WOWLAN_RFKILL_RELEASE | + WIPHY_WOWLAN_NET_DETECT; if (!iwlwifi_mod_params.sw_crypto) mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE | @@ -449,6 +476,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; + mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; hw->wiphy->wowlan = &mvm->wowlan; } @@ -463,6 +491,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (ret) return ret; + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) { + IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + } + + if (mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) { + IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); + hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; + } + ret = ieee80211_register_hw(mvm->hw); if (ret) iwl_mvm_leds_exit(mvm); @@ -679,10 +718,51 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); } -#ifdef CONFIG_IWLWIFI_DEBUGFS +static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen) +{ + const struct iwl_mvm_dump_ptrs *dump_ptrs = data; + ssize_t bytes_read; + ssize_t bytes_read_trans; + + if (offset < dump_ptrs->op_mode_len) { + bytes_read = min_t(ssize_t, count, + dump_ptrs->op_mode_len - offset); + memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset, + bytes_read); + offset += bytes_read; + count -= bytes_read; + + if (count == 0) + return bytes_read; + } else { + bytes_read = 0; + } + + if (!dump_ptrs->trans_ptr) + return bytes_read; + + offset -= dump_ptrs->op_mode_len; + bytes_read_trans = min_t(ssize_t, count, + dump_ptrs->trans_ptr->len - offset); + memcpy(buffer + bytes_read, + (u8 *)dump_ptrs->trans_ptr->data + offset, + bytes_read_trans); + + return bytes_read + bytes_read_trans; +} + +static void iwl_mvm_free_coredump(const void *data) +{ + const struct iwl_mvm_dump_ptrs *fw_error_dump = data; + + vfree(fw_error_dump->op_mode_ptr); + vfree(fw_error_dump->trans_ptr); + kfree(fw_error_dump); +} + void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) { - static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL }; struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; @@ -695,10 +775,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); - if (mvm->fw_error_dump) - return; - - fw_error_dump = kzalloc(sizeof(*mvm->fw_error_dump), GFP_KERNEL); + fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); if (!fw_error_dump) return; @@ -773,16 +850,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (fw_error_dump->trans_ptr) file_len += fw_error_dump->trans_ptr->len; dump_file->file_len = cpu_to_le32(file_len); - mvm->fw_error_dump = fw_error_dump; - /* notify the userspace about the error we had */ - kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env); + dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0, + GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump); } -#endif static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { - iwl_mvm_fw_error_dump(mvm); + /* clear the D3 reconfig, we only need it to avoid dumping a + * firmware coredump on reconfiguration, we shouldn't do that + * on D3->D0 transition + */ + if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) + iwl_mvm_fw_error_dump(mvm); iwl_trans_stop_device(mvm->trans); @@ -803,6 +883,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); + memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); @@ -859,9 +940,8 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw) return ret; } -static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw) +static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); @@ -876,9 +956,50 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw) /* allow transport/FW low power modes */ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); + /* + * If we have TDLS peers, remove them. We don't know the last seqno/PN + * of packets the FW sent out, so we must reconnect. + */ + iwl_mvm_teardown_tdls_peers(mvm); + mutex_unlock(&mvm->mutex); } +static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) +{ + bool exit_now; + + if (!iwl_mvm_is_d0i3_supported(mvm)) + return; + + mutex_lock(&mvm->d0i3_suspend_mutex); + __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); + exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, + &mvm->d0i3_suspend_flags); + mutex_unlock(&mvm->d0i3_suspend_mutex); + + if (exit_now) { + IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); + _iwl_mvm_exit_d0i3(mvm); + } +} + +static void +iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + switch (reconfig_type) { + case IEEE80211_RECONFIG_TYPE_RESTART: + iwl_mvm_restart_complete(mvm); + break; + case IEEE80211_RECONFIG_TYPE_SUSPEND: + iwl_mvm_resume_complete(mvm); + break; + } +} + void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); @@ -1087,7 +1208,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 tfd_msk = iwl_mvm_mac_get_queues_mask(mvm, vif); + u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif); if (tfd_msk) { mutex_lock(&mvm->mutex); @@ -1383,6 +1504,9 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, .cmd = cmd, }; + if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) + return false; + memset(cmd, 0, sizeof(*cmd)); cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); cmd->max_macs = ARRAY_SIZE(cmd->macs); @@ -1835,9 +1959,11 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, req->n_channels > mvm->fw->ucode_capa.n_scan_channels) return -EINVAL; - ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED); - if (ret) - return ret; + if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED); + if (ret) + return ret; + } mutex_lock(&mvm->mutex); @@ -1848,7 +1974,9 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + ret = iwl_mvm_scan_umac(mvm, vif, hw_req); + else if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req); else ret = iwl_mvm_scan_request(mvm, vif, req); @@ -2065,6 +2193,15 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, out_unlock: mutex_unlock(&mvm->mutex); + if (sta->tdls && ret == 0) { + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) + ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); + else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) + ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); + } + return ret; } @@ -2147,9 +2284,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; - ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS); - if (ret) - return ret; + if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS); + if (ret) + return ret; + } mutex_lock(&mvm->mutex); @@ -2169,27 +2308,10 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, goto out; } - mvm->scan_status = IWL_MVM_SCAN_SCHED; - - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) { - ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies); - if (ret) - goto err; - } - - ret = iwl_mvm_config_sched_scan_profiles(mvm, req); + ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies); if (ret) - goto err; - - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) - ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies); - else - ret = iwl_mvm_sched_scan_start(mvm, req); + mvm->scan_status = IWL_MVM_SCAN_NONE; - if (!ret) - goto out; -err: - mvm->scan_status = IWL_MVM_SCAN_NONE; out: mutex_unlock(&mvm->mutex); return ret; @@ -2207,6 +2329,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, iwl_mvm_wait_for_async_handlers(mvm); return ret; + } static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, @@ -2235,12 +2358,16 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - /* - * Support for TX only, at least for now, so accept - * the key and do nothing else. Then mac80211 will - * pass it for TX but we don't have to use it for RX. + /* For non-client mode, only use WEP keys for TX as we probably + * don't have a station yet anyway and would then have to keep + * track of the keys, linking them to each of the clients/peers + * as they appear. For now, don't do that, for performance WEP + * offload doesn't really matter much, but we need it for some + * other offload features in client mode. */ - return 0; + if (vif->type != NL80211_IFTYPE_STATION) + return 0; + break; default: /* currently FW supports only one optional cipher scheme */ if (hw->n_cipher_schemes && @@ -2563,7 +2690,7 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) IWL_DEBUG_MAC80211(mvm, "enter\n"); mutex_lock(&mvm->mutex); - iwl_mvm_stop_p2p_roc(mvm); + iwl_mvm_stop_roc(mvm); mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); @@ -2676,8 +2803,8 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, switch (vif->type) { case NL80211_IFTYPE_AP: - /* Unless it's a CSA flow we have nothing to do here */ - if (vif->csa_active) { + /* only needed if we're switching chanctx (i.e. during CSA) */ + if (switching_chanctx) { mvmvif->ap_ibss_active = true; break; } @@ -2721,23 +2848,32 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, } /* Handle binding during CSA */ - if ((vif->type == NL80211_IFTYPE_AP) || - (switching_chanctx && (vif->type == NL80211_IFTYPE_STATION))) { + if (vif->type == NL80211_IFTYPE_AP) { iwl_mvm_update_quotas(mvm, NULL); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } - if (vif->csa_active && vif->type == NL80211_IFTYPE_STATION) { - struct iwl_mvm_sta *mvmsta; + if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { + u32 duration = 2 * vif->bss_conf.beacon_int; - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, - mvmvif->ap_sta_id); + /* iwl_mvm_protect_session() reads directly from the + * device (the system time), so make sure it is + * available. + */ + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); + if (ret) + goto out_remove_binding; - if (WARN_ON(!mvmsta)) - goto out; + /* Protect the session to make sure we hear the first + * beacon on the new channel. + */ + iwl_mvm_protect_session(mvm, vif, duration, duration, + vif->bss_conf.beacon_int / 2, + true); - /* TODO: only re-enable after the first beacon */ - iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); + iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); + + iwl_mvm_update_quotas(mvm, NULL); } goto out; @@ -2771,7 +2907,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_vif *disabled_vif = NULL; - struct iwl_mvm_sta *mvmsta; lockdep_assert_held(&mvm->mutex); @@ -2786,9 +2921,11 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ - if (!vif->csa_active || !mvmvif->ap_ibss_active) + if (!switching_chanctx || !mvmvif->ap_ibss_active) goto out; + mvmvif->csa_countdown = false; + /* Set CS bit on all the stations */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); @@ -2803,12 +2940,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, disabled_vif = vif; - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, - mvmvif->ap_sta_id); - - if (!WARN_ON(!mvmsta)) - iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); - iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); break; default: @@ -2834,18 +2965,12 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif_chanctx_switch *vifs, - int n_vifs, - enum ieee80211_chanctx_switch_mode mode) +static int +iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, + struct ieee80211_vif_chanctx_switch *vifs) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; - /* we only support SWAP_CONTEXTS and with a single-vif right now */ - if (mode != CHANCTX_SWMODE_SWAP_CONTEXTS || n_vifs > 1) - return -EOPNOTSUPP; - mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); @@ -2874,15 +2999,51 @@ out_remove: __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); out_reassign: - ret = __iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx); - if (ret) { + if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); goto out_restart; } - ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, + if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, + true)) { + IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); + goto out_restart; + } + + goto out; + +out_restart: + /* things keep failing, better restart the hw */ + iwl_mvm_nic_restart(mvm, false); + +out: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static int +iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, + struct ieee80211_vif_chanctx_switch *vifs) +{ + int ret; + + mutex_lock(&mvm->mutex); + __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); + + ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, true); if (ret) { + IWL_ERR(mvm, + "failed to assign new_ctx during channel switch\n"); + goto out_reassign; + } + + goto out; + +out_reassign: + if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, + true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } @@ -2895,6 +3056,34 @@ out_restart: out: mutex_unlock(&mvm->mutex); + + return ret; +} + +static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + /* we only support a single-vif right now */ + if (n_vifs > 1) + return -EOPNOTSUPP; + + switch (mode) { + case CHANCTX_SWMODE_SWAP_CONTEXTS: + ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); + break; + case CHANCTX_SWMODE_REASSIGN_VIF: + ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); + break; + default: + ret = -EOPNOTSUPP; + break; + } + return ret; } @@ -2980,27 +3169,134 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, } #endif -static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_chan_def *chandef) +static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + /* By implementing this operation, we prevent mac80211 from + * starting its own channel switch timer, so that we can call + * ieee80211_chswitch_done() ourselves at the right time + * (which is when the absence time event starts). + */ + + IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), + "dummy channel switch op\n"); +} + +static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 apply_time; + int ret; mutex_lock(&mvm->mutex); - csa_vif = rcu_dereference_protected(mvm->csa_vif, - lockdep_is_held(&mvm->mutex)); - if (WARN(csa_vif && csa_vif->csa_active, - "Another CSA is already in progress")) + IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", + chsw->chandef.center_freq1); + + switch (vif->type) { + case NL80211_IFTYPE_AP: + csa_vif = + rcu_dereference_protected(mvm->csa_vif, + lockdep_is_held(&mvm->mutex)); + if (WARN_ONCE(csa_vif && csa_vif->csa_active, + "Another CSA is already in progress")) { + ret = -EBUSY; + goto out_unlock; + } + + rcu_assign_pointer(mvm->csa_vif, vif); + + if (WARN_ONCE(mvmvif->csa_countdown, + "Previous CSA countdown didn't complete")) { + ret = -EBUSY; + goto out_unlock; + } + + break; + case NL80211_IFTYPE_STATION: + /* Schedule the time event to a bit before beacon 1, + * to make sure we're in the new channel when the + * GO/AP arrives. + */ + apply_time = chsw->device_timestamp + + ((vif->bss_conf.beacon_int * (chsw->count - 1) - + IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); + + if (chsw->block_tx) + iwl_mvm_csa_client_absent(mvm, vif); + + iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, + apply_time); + if (mvmvif->bf_data.bf_enabled) { + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_unlock; + } + + break; + default: + break; + } + + mvmvif->ps_disabled = true; + + ret = iwl_mvm_power_update_ps(mvm); + if (ret) goto out_unlock; - IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n", - chandef->center_freq1); - rcu_assign_pointer(mvm->csa_vif, vif); + /* we won't be on this channel any longer */ + iwl_mvm_teardown_tdls_peers(mvm); + +out_unlock: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_STATION) { + struct iwl_mvm_sta *mvmsta; + + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, + mvmvif->ap_sta_id); + + if (WARN_ON(!mvmsta)) { + ret = -EIO; + goto out_unlock; + } + + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); + + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_unlock; + + iwl_mvm_stop_session_protection(mvm, vif); + } + + mvmvif->ps_disabled = false; + + ret = iwl_mvm_power_update_ps(mvm); out_unlock: mutex_unlock(&mvm->mutex); + + return ret; } static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, @@ -3009,33 +3305,52 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif; struct iwl_mvm_sta *mvmsta; + struct ieee80211_sta *sta; + int i; + u32 msk = 0; if (!vif || vif->type != NL80211_IFTYPE_STATION) return; mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id); - if (WARN_ON_ONCE(!mvmsta)) - goto done; + /* flush the AP-station and all TDLS peers */ + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (mvmsta->vif != vif) + continue; + + /* make sure only TDLS peers or the AP are flushed */ + WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); + + msk |= mvmsta->tfd_queue_msk; + } if (drop) { - if (iwl_mvm_flush_tx_path(mvm, mvmsta->tfd_queue_msk, true)) + if (iwl_mvm_flush_tx_path(mvm, msk, true)) IWL_ERR(mvm, "flush request fail\n"); + mutex_unlock(&mvm->mutex); } else { - iwl_trans_wait_tx_queue_empty(mvm->trans, - mvmsta->tfd_queue_msk); + mutex_unlock(&mvm->mutex); + + /* this can take a while, and we may need/want other operations + * to succeed while doing this, so do it without the mutex held + */ + iwl_trans_wait_tx_queue_empty(mvm->trans, msk); } -done: - mutex_unlock(&mvm->mutex); } const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .ampdu_action = iwl_mvm_mac_ampdu_action, .start = iwl_mvm_mac_start, - .restart_complete = iwl_mvm_mac_restart_complete, + .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, .add_interface = iwl_mvm_mac_add_interface, .remove_interface = iwl_mvm_mac_remove_interface, @@ -3076,7 +3391,13 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .set_tim = iwl_mvm_set_tim, - .channel_switch_beacon = iwl_mvm_channel_switch_beacon, + .channel_switch = iwl_mvm_channel_switch, + .pre_channel_switch = iwl_mvm_pre_channel_switch, + .post_channel_switch = iwl_mvm_post_channel_switch, + + .tdls_channel_switch = iwl_mvm_tdls_channel_switch, + .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, + .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 845429c88cf403fdaffee73951fa99251b619137..d24660fb4ef23b85cdffff195e1c04c77a43857b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -87,12 +87,18 @@ /* A TimeUnit is 1024 microsecond */ #define MSEC_TO_TU(_msec) (_msec*1000/1024) -/* This value represents the number of TUs before CSA "beacon 0" TBTT - * when the CSA time-event needs to be scheduled to start. It must be - * big enough to ensure that we switch in time. +/* For GO, this value represents the number of TUs before CSA "beacon + * 0" TBTT when the CSA time-event needs to be scheduled to start. It + * must be big enough to ensure that we switch in time. */ #define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 +/* For client, this value represents the number of TUs before CSA + * "beacon 1" TBTT, instead. This is because we don't know when the + * GO/AP will be in the new channel, so we switch early enough. + */ +#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10 + /* * This value (in TUs) is used to fine tune the CSA NoA end time which should * be just before "beacon 0" TBTT. @@ -269,6 +275,7 @@ enum iwl_mvm_ref_type { IWL_MVM_REF_NMI, IWL_MVM_REF_TM_CMD, IWL_MVM_REF_EXIT_WORK, + IWL_MVM_REF_PROTECT_CSA, /* update debugfs.c when changing this */ @@ -288,7 +295,6 @@ enum iwl_bt_force_ant_mode { * struct iwl_mvm_vif_bf_data - beacon filtering related data * @bf_enabled: indicates if beacon filtering is enabled * @ba_enabled: indicated if beacon abort is enabled -* @last_beacon_signal: last beacon rssi signal in dbm * @ave_beacon_signal: average beacon signal * @last_cqm_event: rssi of the last cqm event * @bt_coex_min_thold: minimum threshold for BT coex @@ -399,6 +405,9 @@ struct iwl_mvm_vif { /* FW identified misbehaving AP */ u8 uapsd_misbehaving_bssid[ETH_ALEN]; + + /* Indicates that CSA countdown may be started */ + bool csa_countdown; }; static inline struct iwl_mvm_vif * @@ -519,6 +528,13 @@ enum { #define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 +enum iwl_mvm_tdls_cs_state { + IWL_MVM_TDLS_SW_IDLE = 0, + IWL_MVM_TDLS_SW_REQ_SENT, + IWL_MVM_TDLS_SW_REQ_RCVD, + IWL_MVM_TDLS_SW_ACTIVE, +}; + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -578,6 +594,7 @@ struct iwl_mvm { struct work_struct sta_drained_wk; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; atomic_t pending_frames[IWL_MVM_STATION_COUNT]; + u32 tfd_drained[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -588,6 +605,10 @@ struct iwl_mvm { void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; + /* UMAC scan tracking */ + u32 scan_uid[IWL_MVM_MAX_SIMULTANEOUS_SCANS]; + u8 scan_seq_num, sched_scan_seq_num; + /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; @@ -649,7 +670,7 @@ struct iwl_mvm { /* -1 for always, 0 for never, >0 for that many times */ s8 restart_fw; struct work_struct fw_error_dump_wk; - struct iwl_mvm_dump_ptrs *fw_error_dump; + enum iwl_fw_dbg_conf fw_dbg_conf; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; @@ -660,6 +681,15 @@ struct iwl_mvm { #ifdef CONFIG_PM_SLEEP struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; + + /* sched scan settings for net detect */ + struct cfg80211_sched_scan_request *nd_config; + struct ieee80211_scan_ies nd_ies; + struct cfg80211_match_set *nd_match_sets; + int n_nd_match_sets; + struct ieee80211_channel **nd_channels; + int n_nd_channels; + bool net_detect; #ifdef CONFIG_IWLWIFI_DEBUGFS u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */ bool d3_test_active; @@ -732,6 +762,28 @@ struct iwl_mvm { u32 ap_last_beacon_gp2; u8 low_latency_agg_frame_limit; + + /* TDLS channel switch data */ + struct { + struct delayed_work dwork; + enum iwl_mvm_tdls_cs_state state; + + /* + * Current cs sta - might be different from periodic cs peer + * station. Value is meaningless when the cs-state is idle. + */ + u8 cur_sta_id; + + /* TDLS periodic channel-switch peer */ + struct { + u8 sta_id; + u8 op_class; + bool initiator; /* are we the link initiator */ + struct cfg80211_chan_def chandef; + struct sk_buff *skb; /* ch sw template */ + u32 ch_sw_tm_ie; + } peer; + } tdls_cs; }; /* Extract MVM priv from op_mode and _hw */ @@ -748,6 +800,7 @@ enum iwl_mvm_status { IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_ROC_AUX_RUNNING, + IWL_MVM_STATUS_D3_RECONFIG, }; static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) @@ -756,6 +809,26 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); } +/* Must be called with rcu_read_lock() held and it can only be + * released when mvmsta is not needed anymore. + */ +static inline struct iwl_mvm_sta * +iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id) +{ + struct ieee80211_sta *sta; + + if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) + return NULL; + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + /* This can happen if the station has been removed right now */ + if (IS_ERR_OR_NULL(sta)) + return NULL; + + return iwl_mvm_sta_from_mac80211(sta); +} + static inline struct iwl_mvm_sta * iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) { @@ -829,6 +902,16 @@ int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); +void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, u8 sta_id); +void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag); +void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc); #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); #else @@ -885,6 +968,8 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, struct iwl_device_cmd *cmd); int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); +int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); /* MVM PHY */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, @@ -898,6 +983,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); +u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); +u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); /* MAC (virtual interface) programming */ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -906,8 +993,7 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); +u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, @@ -918,6 +1004,8 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_device_cmd *cmd); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *exclude_vif); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -928,6 +1016,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *disabled_vif); /* Scanning */ +int iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_request(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req); @@ -950,6 +1039,10 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req); +int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies); int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify); int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, @@ -964,6 +1057,17 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies); +/* UMAC scan */ +int iwl_mvm_config_scan(struct iwl_mvm *mvm); +int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req); +int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies); +int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); + /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); @@ -1043,7 +1147,7 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } #endif void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, - struct iwl_wowlan_config_cmd_v2 *cmd); + struct iwl_wowlan_config_cmd *cmd); int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, @@ -1053,6 +1157,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); +bool iwl_mvm_ref_taken(struct iwl_mvm *mvm); void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); @@ -1068,12 +1173,14 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct ieee80211_sta *sta); +bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, enum ieee80211_band band); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); +bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); @@ -1189,6 +1296,10 @@ bool iwl_mvm_is_idle(struct iwl_mvm *mvm); /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); +void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); +int iwl_mvm_temp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_tt_exit(struct iwl_mvm *mvm); @@ -1200,18 +1311,37 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool added_vif); /* TDLS */ + +/* + * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present. + * This TID is marked as used vs the AP and all connected TDLS peers. + */ +#define IWL_MVM_TDLS_FW_TID 4 + int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added); void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); +void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_tdls_ch_sw_params *params); +void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); + +struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); -#ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); -#else -static inline void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) {} -#endif #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index af074563e77076fafb88ae6c3d1c21a6f4da53e3..d55fd8e3654c830d047fbe9712686ef0dbff73b3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -339,11 +339,15 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) } *file_sec; const u8 *eof, *temp; int max_section_size; + const __le32 *dword_buff; #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) #define NVM_WORD2_ID(x) (x >> 12) #define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8)) #define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4) +#define NVM_HEADER_0 (0x2A504C54) +#define NVM_HEADER_1 (0x4E564D2A) +#define NVM_HEADER_SIZE (4 * sizeof(u32)) IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); @@ -372,12 +376,6 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", mvm->nvm_file_name, fw_entry->size); - if (fw_entry->size < sizeof(*file_sec)) { - IWL_ERR(mvm, "NVM file too small\n"); - ret = -EINVAL; - goto out; - } - if (fw_entry->size > MAX_NVM_FILE_LEN) { IWL_ERR(mvm, "NVM file too large\n"); ret = -EINVAL; @@ -385,8 +383,25 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) } eof = fw_entry->data + fw_entry->size; - - file_sec = (void *)fw_entry->data; + dword_buff = (__le32 *)fw_entry->data; + + /* some NVM file will contain a header. + * The header is identified by 2 dwords header as follow: + * dword[0] = 0x2A504C54 + * dword[1] = 0x4E564D2A + * + * This header must be skipped when providing the NVM data to the FW. + */ + if (fw_entry->size > NVM_HEADER_SIZE && + dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && + dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { + file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); + IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); + IWL_INFO(mvm, "NVM Manufacturing date %08X\n", + le32_to_cpu(dword_buff[3])); + } else { + file_sec = (void *)fw_entry->data; + } while (true) { if (file_sec->data > eof) { diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c index adcbf4c8edd86c37400029db065dd1720b8cdfed..68b0169c8892c8a62105e0fcff817c14e1696780 100644 --- a/drivers/net/wireless/iwlwifi/mvm/offloading.c +++ b/drivers/net/wireless/iwlwifi/mvm/offloading.c @@ -67,7 +67,7 @@ #include "mvm.h" void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, - struct iwl_wowlan_config_cmd_v2 *cmd) + struct iwl_wowlan_config_cmd *cmd) { int i; diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 5b719ee8e789075e6e7517512ca311cb6f9b6121..97dfba50c6820b3432244551c2d235b3b2941fa7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -244,6 +244,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_rx_scan_offload_complete_notif, true), RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results, false), + RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, + true), RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false), RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), @@ -254,6 +256,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, iwl_mvm_power_uapsd_misbehaving_ap_notif, false), + RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), + + RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, + true), + RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), + }; #undef RX_HANDLER #define CMD(x) [x] = #x @@ -317,11 +325,9 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(WOWLAN_KEK_KCK_MATERIAL), CMD(WOWLAN_GET_STATUSES), CMD(WOWLAN_TX_POWER_PER_DB), - CMD(NET_DETECT_CONFIG_CMD), - CMD(NET_DETECT_PROFILES_QUERY_CMD), - CMD(NET_DETECT_PROFILES_CMD), - CMD(NET_DETECT_HOTSPOTS_CMD), - CMD(NET_DETECT_HOTSPOTS_QUERY_CMD), + CMD(SCAN_OFFLOAD_PROFILES_QUERY_CMD), + CMD(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD), + CMD(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD), CMD(CARD_STATE_NOTIFICATION), CMD(MISSED_BEACONS_NOTIFICATION), CMD(BT_COEX_PRIO_TABLE), @@ -344,6 +350,13 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), CMD(ANTENNA_COUPLING_NOTIFICATION), CMD(SCD_QUEUE_CFG), + CMD(SCAN_CFG_CMD), + CMD(SCAN_REQ_UMAC), + CMD(SCAN_ABORT_UMAC), + CMD(SCAN_COMPLETE_UMAC), + CMD(TDLS_CHANNEL_SWITCH_CMD), + CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION), + CMD(TDLS_CONFIG_CMD), }; #undef CMD @@ -403,6 +416,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (cfg->max_rx_agg_size) hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; + if (cfg->max_tx_agg_size) + hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; + op_mode = hw->priv; op_mode->ops = &iwl_mvm_ops; @@ -439,6 +455,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk); + INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->refs_lock); @@ -479,6 +496,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); + trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; + trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; + memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, + sizeof(trans->dbg_conf_tlv)); /* set up notification wait support */ iwl_notification_wait_init(&mvm->notif_wait); @@ -522,7 +543,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); err = iwl_run_init_mvm_ucode(mvm, true); - iwl_trans_stop_device(trans); + if (!err || !iwlmvm_mod_params.init_dbg) + iwl_trans_stop_device(trans); mutex_unlock(&mvm->mutex); /* returns 0 if successful, 1 if success but in rfkill */ if (err < 0 && !iwlmvm_mod_params.init_dbg) { @@ -531,16 +553,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } } - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) - scan_size = sizeof(struct iwl_scan_req_unified_lmac) + - sizeof(struct iwl_scan_channel_cfg_lmac) * - mvm->fw->ucode_capa.n_scan_channels + - sizeof(struct iwl_scan_probe_req); - else - scan_size = sizeof(struct iwl_scan_cmd) + - mvm->fw->ucode_capa.max_probe_length + - mvm->fw->ucode_capa.n_scan_channels * - sizeof(struct iwl_scan_channel); + scan_size = iwl_mvm_scan_size(mvm); mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); if (!mvm->scan_cmd) @@ -585,16 +598,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); - if (mvm->fw_error_dump) { - vfree(mvm->fw_error_dump->op_mode_ptr); - vfree(mvm->fw_error_dump->trans_ptr); - kfree(mvm->fw_error_dump); - } kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = NULL; #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) kfree(mvm->d3_resume_sram); + if (mvm->nd_config) { + kfree(mvm->nd_config->match_sets); + kfree(mvm->nd_config); + mvm->nd_config = NULL; + } #endif iwl_trans_op_mode_leave(mvm->trans); @@ -991,7 +1004,7 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac, } static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, - struct iwl_wowlan_config_cmd_v3 *cmd, + struct iwl_wowlan_config_cmd *cmd, struct iwl_d0i3_iter_data *iter_data) { struct ieee80211_sta *ap_sta; @@ -1007,14 +1020,14 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, goto out; mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); - cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported; + cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; cmd->offloading_tid = iter_data->offloading_tid; /* * The d0i3 uCode takes care of the nonqos counters, * so configure only the qos seq ones. */ - iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common); + iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd); out: rcu_read_unlock(); } @@ -1026,14 +1039,11 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) struct iwl_d0i3_iter_data d0i3_iter_data = { .mvm = mvm, }; - struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = { - .common = { - .wakeup_filter = - cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | - IWL_WOWLAN_WAKEUP_BEACON_MISS | - IWL_WOWLAN_WAKEUP_LINK_CHANGE | - IWL_WOWLAN_WAKEUP_BCN_FILTERING), - }, + struct iwl_wowlan_config_cmd wowlan_config_cmd = { + .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | + IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE | + IWL_WOWLAN_WAKEUP_BCN_FILTERING), }; struct iwl_d3_manager_config d3_cfg_cmd = { .min_sleep_time = cpu_to_le32(1000), @@ -1045,6 +1055,19 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); synchronize_net(); + /* + * iwl_mvm_ref_sync takes a reference before checking the flag. + * so by checking there is no held reference we prevent a state + * in which iwl_mvm_ref_sync continues successfully while we + * configure the firmware to enter d0i3 + */ + if (iwl_mvm_ref_taken(mvm)) { + IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n"); + clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); + wake_up(&mvm->d0i3_exit_waitq); + return 1; + } + ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_enter_d0i3_iterator, diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c index 12283b55ee84ff5aa49b629d3bd4b702b9826720..1c0d4a45c1a8e95064a4cfae49c225d419c6dd1e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c @@ -68,7 +68,7 @@ #include "mvm.h" /* Maps the driver specific channel width definition to the the fw values */ -static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) +u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: @@ -90,7 +90,7 @@ static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) * Maps the driver specific control channel position (relative to the center * freq) definitions to the the fw values */ -static inline u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) +u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) { switch (chandef->chan->center_freq - chandef->center_freq1) { case -70: diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 5b85b0cc7a2aa84675ed685a216cd6fe6f1d7f64..2620dd0c45f9638c949fd34b7482533b13c33126 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -286,6 +286,27 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, return true; } +static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi) +{ + int numerator; + int dtim_interval = dtimper * bi; + + if (WARN_ON(!dtim_interval)) + return 0; + + if (dtimper == 1) { + if (bi > 100) + numerator = 408; + else + numerator = 510; + } else if (dtimper < 10) { + numerator = 612; + } else { + return 0; + } + return max(1, (numerator / dtim_interval)); +} + static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; @@ -308,7 +329,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { - int dtimper, dtimper_msec; + int dtimper, bi; int keep_alive; bool radar_detect = false; struct iwl_mvm_vif *mvmvif __maybe_unused = @@ -317,6 +338,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); dtimper = vif->bss_conf.dtim_period; + bi = vif->bss_conf.beacon_int; /* * Regardless of power management state the driver must set @@ -324,10 +346,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, * immediately after association. Check that keep alive period * is at least 3 * DTIM */ - dtimper_msec = dtimper * vif->bss_conf.beacon_int; - keep_alive = max_t(int, 3 * dtimper_msec, - MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC); - keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); + keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi), + USEC_PER_SEC); + keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC); cmd->keep_alive_seconds = cpu_to_le16(keep_alive); if (mvm->ps_disabled) @@ -352,11 +373,14 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, radar_detect = iwl_mvm_power_is_radar(vif); /* Check skip over DTIM conditions */ - if (!radar_detect && (dtimper <= 10) && + if (!radar_detect && (dtimper < 10) && (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || mvm->cur_ucode == IWL_UCODE_WOWLAN)) { - cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); - cmd->skip_dtim_periods = 3; + cmd->skip_dtim_periods = + iwl_mvm_power_get_skip_over_dtim(dtimper, bi); + if (cmd->skip_dtim_periods) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); } if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 18a539999580410dbb9caafdae6d975d15def1fe..30ceb67ed7a7a22a86c376f9713a4af9d8e84125 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -158,6 +158,12 @@ struct rs_tx_column { allow_column_func_t checks[MAX_COLUMN_CHECKS]; }; +static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl) +{ + return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); +} + static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_scale_tbl_info *tbl) { @@ -218,6 +224,9 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, + .checks = { + rs_ant_allow, + }, }, [RS_COLUMN_LEGACY_ANT_B] = { .mode = RS_LEGACY, @@ -231,6 +240,9 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_INVALID, RS_COLUMN_INVALID, }, + .checks = { + rs_ant_allow, + }, }, [RS_COLUMN_SISO_ANT_A] = { .mode = RS_SISO, @@ -246,6 +258,7 @@ static const struct rs_tx_column rs_tx_columns[] = { }, .checks = { rs_siso_allow, + rs_ant_allow, }, }, [RS_COLUMN_SISO_ANT_B] = { @@ -262,6 +275,7 @@ static const struct rs_tx_column rs_tx_columns[] = { }, .checks = { rs_siso_allow, + rs_ant_allow, }, }, [RS_COLUMN_SISO_ANT_A_SGI] = { @@ -279,6 +293,7 @@ static const struct rs_tx_column rs_tx_columns[] = { }, .checks = { rs_siso_allow, + rs_ant_allow, rs_sgi_allow, }, }, @@ -297,6 +312,7 @@ static const struct rs_tx_column rs_tx_columns[] = { }, .checks = { rs_siso_allow, + rs_ant_allow, rs_sgi_allow, }, }, @@ -505,10 +521,11 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type) static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate, const char *prefix) { - IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d\n", + IWL_DEBUG_RATE(mvm, + "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d STBC: %d\n", prefix, rs_pretty_lq_type(rate->type), rate->index, rs_pretty_ant(rate->ant), - rate->bw, rate->sgi, rate->ldpc); + rate->bw, rate->sgi, rate->ldpc, rate->stbc); } static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) @@ -741,6 +758,12 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type); } + if (is_siso(rate) && rate->stbc) { + /* To enable STBC we need to set both a flag and ANT_AB */ + ucode_rate |= RATE_MCS_ANT_AB_MSK; + ucode_rate |= RATE_MCS_VHT_STBC_MSK; + } + ucode_rate |= rate->bw; if (rate->sgi) ucode_rate |= RATE_MCS_SGI_MSK; @@ -785,6 +808,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, rate->sgi = true; if (ucode_rate & RATE_MCS_LDPC_MSK) rate->ldpc = true; + if (ucode_rate & RATE_MCS_VHT_STBC_MSK) + rate->stbc = true; rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK; @@ -794,7 +819,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, if (nss == 1) { rate->type = LQ_HT_SISO; - WARN_ON_ONCE(num_of_ant != 1); + WARN_ON_ONCE(!rate->stbc && num_of_ant != 1); } else if (nss == 2) { rate->type = LQ_HT_MIMO2; WARN_ON_ONCE(num_of_ant != 2); @@ -807,7 +832,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, if (nss == 1) { rate->type = LQ_VHT_SISO; - WARN_ON_ONCE(num_of_ant != 1); + WARN_ON_ONCE(!rate->stbc && num_of_ant != 1); } else if (nss == 2) { rate->type = LQ_VHT_MIMO2; WARN_ON_ONCE(num_of_ant != 2); @@ -992,7 +1017,15 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, static inline bool rs_rate_match(struct rs_rate *a, struct rs_rate *b) { - return (a->type == b->type) && (a->ant == b->ant) && (a->sgi == b->sgi); + bool ant_match; + + if (a->stbc) + ant_match = (b->ant == ANT_A || b->ant == ANT_B); + else + ant_match = (a->ant == b->ant); + + return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) + && ant_match; } static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags) @@ -1093,10 +1126,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (time_after(jiffies, (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) { - int tid; + int t; + IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) - ieee80211_stop_tx_ba_session(sta, tid); + for (t = 0; t < IWL_MAX_TID_COUNT; t++) + ieee80211_stop_tx_ba_session(sta, t); iwl_mvm_rs_rate_init(mvm, sta, info->band, false); return; @@ -1137,16 +1171,15 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* Rate did match, so reset the missed_rate_counter */ lq_sta->missed_rate_counter = 0; - /* Figure out if rate scale algorithm is in active or search table */ - if (rs_rate_match(&rate, - &(lq_sta->lq_info[lq_sta->active_tbl].rate))) { + if (!lq_sta->search_better_tbl) { curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); - } else if (rs_rate_match(&rate, - &lq_sta->lq_info[1 - lq_sta->active_tbl].rate)) { + } else { curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - } else { + } + + if (WARN_ON_ONCE(!rs_rate_match(&rate, &curr_tbl->rate))) { IWL_DEBUG_RATE(mvm, "Neither active nor search matches tx rate\n"); tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); @@ -1171,6 +1204,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, * first index into rate scale table. */ if (info->flags & IEEE80211_TX_STAT_AMPDU) { + /* ampdu_ack_len = 0 marks no BA was received. In this case + * treat it as a single frame loss as we don't want the success + * ratio to dip too quickly because a BA wasn't received + */ + if (info->status.ampdu_ack_len == 0) + info->status.ampdu_len = 1; + ucode_rate = le32_to_cpu(table->rs_table[0]); rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); rs_collect_tx_data(lq_sta, curr_tbl, rate.index, @@ -1225,7 +1265,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); done: /* See if there's a better rate or modulation mode to try. */ - if (sta && sta->supp_rates[info->band]) + if (sta->supp_rates[info->band]) rs_rate_scale_perform(mvm, sta, lq_sta, tid); } @@ -1623,6 +1663,8 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, else rate->type = LQ_LEGACY_G; + rate->bw = RATE_MCS_CHAN_WIDTH_20; + rate->ldpc = false; rate_mask = lq_sta->active_legacy_rate; } else if (column->mode == RS_SISO) { rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; @@ -1634,8 +1676,11 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, WARN_ON_ONCE("Bad column mode"); } - rate->bw = rs_bw_from_sta_bw(sta); - rate->ldpc = lq_sta->ldpc; + if (column->mode != RS_LEGACY) { + rate->bw = rs_bw_from_sta_bw(sta); + rate->ldpc = lq_sta->ldpc; + } + search_tbl->column = col_id; rs_set_expected_tpt_table(lq_sta, search_tbl); @@ -1754,6 +1799,29 @@ out: return action; } +static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_vif *vif = mvmsta->vif; + bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION && + !vif->bss_conf.ps); + + /* Our chip supports Tx STBC and the peer is an HT/VHT STA which + * supports STBC of at least 1*SS + */ + if (!lq_sta->stbc) + return false; + + if (!mvm->ps_disabled && !sta_ps_disabled) + return false; + + if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) + return false; + + return true; +} + static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index, int *weaker, int *stronger) { @@ -2675,6 +2743,11 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (mvm->cfg->ht_params->ldpc && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) lq_sta->ldpc = true; + + if (mvm->cfg->ht_params->stbc && + (num_of_ant(mvm->fw->valid_tx_ant) > 1) && + (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) + lq_sta->stbc = true; } else { rs_vht_set_enabled_rates(sta, vht_cap, lq_sta); lq_sta->is_vht = true; @@ -2682,8 +2755,16 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (mvm->cfg->ht_params->ldpc && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)) lq_sta->ldpc = true; + + if (mvm->cfg->ht_params->stbc && + (num_of_ant(mvm->fw->valid_tx_ant) > 1) && + (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)) + lq_sta->stbc = true; } + if (IWL_MVM_RS_DISABLE_MIMO) + lq_sta->active_mimo2_rate = 0; + lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate, BITS_PER_LONG); lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate, @@ -2692,11 +2773,11 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, BITS_PER_LONG); IWL_DEBUG_RATE(mvm, - "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d\n", + "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC%d\n", lq_sta->active_legacy_rate, lq_sta->active_siso_rate, lq_sta->active_mimo2_rate, - lq_sta->is_vht, lq_sta->ldpc); + lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc); IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n", lq_sta->max_legacy_rate_idx, lq_sta->max_siso_rate_idx, @@ -2820,6 +2901,7 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm, * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps */ static void rs_build_rates_table(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, const struct rs_rate *initial_rate) { @@ -2832,6 +2914,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, memcpy(&rate, initial_rate, sizeof(rate)); valid_tx_ant = mvm->fw->valid_tx_ant; + rate.stbc = rs_stbc_allow(mvm, sta, lq_sta); if (is_siso(&rate)) { num_rates = RS_INITIAL_SISO_NUM_RATES; @@ -2903,7 +2986,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, if (WARN_ON_ONCE(!sta || !initial_rate)) return; - rs_build_rates_table(mvm, lq_sta, initial_rate); + rs_build_rates_table(mvm, sta, lq_sta, initial_rate); if (num_of_ant(initial_rate->ant) == 1) lq_cmd->single_stream_ant_msk = initial_rate->ant; diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index eb34c1209acc2d6fdd9d35fd2b03d2ea3e565780..defd70a6d9e66da6f64cd39989a607f286821966 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -208,6 +208,7 @@ struct rs_rate { u32 bw; bool sgi; bool ldpc; + bool stbc; }; @@ -331,6 +332,7 @@ struct iwl_lq_sta { u64 last_tx; bool is_vht; bool ldpc; /* LDPC Rx is supported by the STA */ + bool stbc; /* Tx STBC is supported by chip and Rx by STA */ enum ieee80211_band band; /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 3cf40f3f58ec2214323b16aa2708dad1c3430163..94b6e7297a1eca297488ac8377496756c85a18e2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -96,27 +96,27 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, - u32 ampdu_status, - struct iwl_rx_cmd_buffer *rxb, - struct ieee80211_rx_status *stats) + u32 ampdu_status, u8 crypt_len, + struct iwl_rx_cmd_buffer *rxb) { - struct sk_buff *skb; unsigned int hdrlen, fraglen; - /* Dont use dev_alloc_skb(), we'll have enough headroom once - * ieee80211_hdr pulled. - */ - skb = alloc_skb(128, GFP_ATOMIC); - if (!skb) { - IWL_ERR(mvm, "alloc_skb failed\n"); - return; - } /* If frame is small enough to fit in skb->head, pull it completely. - * If not, only pull ieee80211_hdr so that splice() or TCP coalesce - * are more efficient. + * If not, only pull ieee80211_hdr (including crypto if present, and + * an additional 8 bytes for SNAP/ethertype, see below) so that + * splice() or TCP coalesce are more efficient. + * + * Since, in addition, ieee80211_data_to_8023() always pull in at + * least 8 bytes (possibly more for mesh) we can do the same here + * to save the cost of doing it later. That still doesn't pull in + * the actual IP header since the typical case has a SNAP header. + * If the latter changes (there are efforts in the standards group + * to do so) we should revisit this and ieee80211_data_to_8023(). */ - hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr); + hdrlen = (len <= skb_tailroom(skb)) ? len : + sizeof(*hdr) + crypt_len + 8; memcpy(skb_put(skb, hdrlen), hdr, hdrlen); fraglen = len - hdrlen; @@ -129,8 +129,6 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, fraglen, rxb->truesize); } - memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - ieee80211_rx(mvm->hw, skb); } @@ -185,7 +183,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, - u32 rx_pkt_status) + u32 rx_pkt_status, + u8 *crypt_len) { if (!ieee80211_has_protected(hdr->frame_control) || (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == @@ -205,12 +204,14 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, stats->flag |= RX_FLAG_DECRYPTED; IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n"); + *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: /* Don't drop the frame and decrypt it in SW */ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) return 0; + *crypt_len = IEEE80211_TKIP_IV_LEN; /* fall through if TTAK OK */ case RX_MPDU_RES_STATUS_SEC_WEP_ENC: @@ -218,6 +219,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, return -1; stats->flag |= RX_FLAG_DECRYPTED; + if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == + RX_MPDU_RES_STATUS_SEC_WEP_ENC) + *crypt_len = IEEE80211_WEP_IV_LEN; return 0; case RX_MPDU_RES_STATUS_SEC_EXT_ENC: @@ -242,15 +246,17 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { struct ieee80211_hdr *hdr; - struct ieee80211_rx_status rx_status = {}; + struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_info *phy_info; struct iwl_rx_mpdu_res_start *rx_res; struct ieee80211_sta *sta; + struct sk_buff *skb; u32 len; u32 ampdu_status; u32 rate_n_flags; u32 rx_pkt_status; + u8 crypt_len = 0; phy_info = &mvm->last_phy_info; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; @@ -259,20 +265,32 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, rx_pkt_status = le32_to_cpup((__le32 *) (pkt->data + sizeof(*rx_res) + len)); - memset(&rx_status, 0, sizeof(rx_status)); + /* Dont use dev_alloc_skb(), we'll have enough headroom once + * ieee80211_hdr pulled. + */ + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mvm, "alloc_skb failed\n"); + return 0; + } + + rx_status = IEEE80211_SKB_RXCB(skb); /* * drop the packet if it has failed being decrypted by HW */ - if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, &rx_status, rx_pkt_status)) { + if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, + &crypt_len)) { IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", rx_pkt_status); + kfree_skb(skb); return 0; } if ((unlikely(phy_info->cfg_phy_cnt > 20))) { IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n", phy_info->cfg_phy_cnt); + kfree_skb(skb); return 0; } @@ -283,31 +301,31 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) || !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) { IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); - rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* This will be used in several places later */ rate_n_flags = le32_to_cpu(phy_info->rate_n_flags); /* rx_status carries information about the packet to mac80211 */ - rx_status.mactime = le64_to_cpu(phy_info->timestamp); - rx_status.device_timestamp = le32_to_cpu(phy_info->system_timestamp); - rx_status.band = + rx_status->mactime = le64_to_cpu(phy_info->timestamp); + rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp); + rx_status->band = (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - rx_status.freq = + rx_status->freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), - rx_status.band); + rx_status->band); /* * TSF as indicated by the fw is at INA time, but mac80211 expects the * TSF at the beginning of the MPDU. */ - /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ + /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/ - iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status); + iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); - IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, - (unsigned long long)rx_status.mactime); + IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal, + (unsigned long long)rx_status->mactime); rcu_read_lock(); /* @@ -326,15 +344,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, if (sta) { struct iwl_mvm_sta *mvmsta; mvmsta = iwl_mvm_sta_from_mac80211(sta); - rs_update_last_rssi(mvm, &mvmsta->lq_sta, - &rx_status); + rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); } rcu_read_unlock(); /* set the preamble flag if appropriate */ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status->flag |= RX_FLAG_SHORTPRE; if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { /* @@ -342,8 +359,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, * together since we get a single PHY response * from the firmware for all of them */ - rx_status.flag |= RX_FLAG_AMPDU_DETAILS; - rx_status.ampdu_reference = mvm->ampdu_ref; + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->ampdu_reference = mvm->ampdu_ref; } /* Set up the HT phy flags */ @@ -351,50 +368,50 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: - rx_status.flag |= RX_FLAG_40MHZ; + rx_status->flag |= RX_FLAG_40MHZ; break; case RATE_MCS_CHAN_WIDTH_80: - rx_status.vht_flag |= RX_VHT_FLAG_80MHZ; + rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; break; case RATE_MCS_CHAN_WIDTH_160: - rx_status.vht_flag |= RX_VHT_FLAG_160MHZ; + rx_status->vht_flag |= RX_VHT_FLAG_160MHZ; break; } if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status.flag |= RX_FLAG_SHORT_GI; + rx_status->flag |= RX_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) - rx_status.flag |= RX_FLAG_HT_GF; + rx_status->flag |= RX_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status.flag |= RX_FLAG_LDPC; + rx_status->flag |= RX_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status.flag |= RX_FLAG_HT; - rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; - rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->flag |= RX_FLAG_HT; + rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status.vht_nss = + rx_status->vht_nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; - rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; - rx_status.flag |= RX_FLAG_VHT; - rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + rx_status->flag |= RX_FLAG_VHT; + rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status.vht_flag |= RX_VHT_FLAG_BF; + rx_status->vht_flag |= RX_VHT_FLAG_BF; } else { - rx_status.rate_idx = + rx_status->rate_idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, - rx_status.band); + rx_status->band); } #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags, - rx_status.flag & RX_FLAG_AMPDU_DETAILS); + rx_status->flag & RX_FLAG_AMPDU_DETAILS); #endif - iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status, - rxb, &rx_status); + iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status, + crypt_len, rxb); return 0; } @@ -500,29 +517,8 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, .mvm = mvm, }; - /* - * set temperature debug enabled - ignore FW temperature updates - * and use the user set temperature. - */ - if (mvm->temperature_test) { - if (mvm->temperature < le32_to_cpu(common->temperature)) - IWL_DEBUG_TEMP(mvm, - "Ignoring FW temperature update that is greater than the debug set temperature (debug temp = %d, fw temp = %d)\n", - mvm->temperature, - le32_to_cpu(common->temperature)); - /* - * skip iwl_mvm_tt_handler since we are in - * temperature debug mode and we are ignoring - * the new temperature value - */ - goto update; - } + iwl_mvm_tt_temp_changed(mvm, le32_to_cpu(common->temperature)); - if (mvm->temperature != le32_to_cpu(common->temperature)) { - mvm->temperature = le32_to_cpu(common->temperature); - iwl_mvm_tt_handler(mvm); - } -update: iwl_mvm_update_rx_statistics(mvm, stats); ieee80211_iterate_active_interfaces(mvm->hw, diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 7554f705383063fa3c7c3610595e9e39f0cc9af8..e5294d01181e404c44d78190df5ae869d8c0cd21 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -83,15 +83,29 @@ struct iwl_mvm_scan_params { } dwell[IEEE80211_NUM_BANDS]; }; +enum iwl_umac_scan_uid_type { + IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0), + IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1), + IWL_UMAC_SCAN_UID_ALL = IWL_UMAC_SCAN_UID_REG_SCAN | + IWL_UMAC_SCAN_UID_SCHED_SCAN, +}; + +static int iwl_umac_scan_stop(struct iwl_mvm *mvm, + enum iwl_umac_scan_uid_type type, bool notify); + +static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) +{ + if (mvm->scan_rx_ant != ANT_NONE) + return mvm->scan_rx_ant; + return mvm->fw->valid_rx_ant; +} + static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) { u16 rx_chain; u8 rx_ant; - if (mvm->scan_rx_ant != ANT_NONE) - rx_ant = mvm->scan_rx_ant; - else - rx_ant = mvm->fw->valid_rx_ant; + rx_ant = iwl_mvm_scan_rx_ant(mvm); rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; @@ -270,7 +284,8 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool *global_bound = data; - if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < MAX_PHYS) + if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && + mvmvif->phy_ctxt->id < MAX_PHYS) *global_bound = true; } @@ -365,6 +380,10 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm, !is_sched_scan) max_probe_len -= 32; + /* DS parameter set element is added on 2.4GHZ band if required */ + if (iwl_mvm_rrm_scan_needed(mvm)) + max_probe_len -= 3; + return max_probe_len; } @@ -536,23 +555,17 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - u8 client_bitmap = 0; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) { + if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) && + !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) { struct iwl_sched_scan_results *notif = (void *)pkt->data; - client_bitmap = notif->client_bitmap; + if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN)) + return 0; } - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN || - client_bitmap & SCAN_CLIENT_SCHED_SCAN) { - if (mvm->scan_status == IWL_MVM_SCAN_SCHED) { - IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); - ieee80211_sched_scan_results(mvm->hw); - } else { - IWL_DEBUG_SCAN(mvm, "Scan results\n"); - } - } + IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); + ieee80211_sched_scan_results(mvm->hw); return 0; } @@ -662,6 +675,7 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, mvm->scan_status = IWL_MVM_SCAN_NONE; ieee80211_scan_completed(mvm->hw, status == IWL_SCAN_OFFLOAD_ABORTED); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); } mvm->last_ebs_successful = !ebs_status; @@ -963,6 +977,20 @@ free_blacklist: return ret; } +static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, + struct cfg80211_sched_scan_request *req) +{ + if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { + IWL_DEBUG_SCAN(mvm, + "Sending scheduled scan with filtering, n_match_sets %d\n", + req->n_match_sets); + return false; + } + + IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); + return true; +} + int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req) { @@ -978,15 +1006,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER, }; - if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { - IWL_DEBUG_SCAN(mvm, - "Sending scheduled scan with filtering, filter len %d\n", - req->n_match_sets); - } else { - IWL_DEBUG_SCAN(mvm, - "Sending Scheduled scan without filtering\n"); + if (iwl_mvm_scan_pass_all(mvm, req)) scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL); - } if (mvm->last_ebs_successful && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) @@ -997,6 +1018,38 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, sizeof(scan_req), &scan_req); } +int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + int ret; + + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + ret = iwl_mvm_config_sched_scan_profiles(mvm, req); + if (ret) + return ret; + ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies); + } else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) { + mvm->scan_status = IWL_MVM_SCAN_SCHED; + ret = iwl_mvm_config_sched_scan_profiles(mvm, req); + if (ret) + return ret; + ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies); + } else { + mvm->scan_status = IWL_MVM_SCAN_SCHED; + ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies); + if (ret) + return ret; + ret = iwl_mvm_config_sched_scan_profiles(mvm, req); + if (ret) + return ret; + ret = iwl_mvm_sched_scan_start(mvm, req); + } + + return ret; +} + static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm) { int ret; @@ -1041,6 +1094,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) lockdep_assert_held(&mvm->mutex); + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, + notify); + if (mvm->scan_status != IWL_MVM_SCAN_SCHED && (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || mvm->scan_status != IWL_MVM_SCAN_OS)) { @@ -1071,8 +1128,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) /* * Clear the scan status so the next scan requests will succeed. This * also ensures the Rx handler doesn't do anything, as the scan was - * stopped from above. + * stopped from above. Since the rx handler won't do anything now, + * we have to release the scan reference here. */ + if (mvm->scan_status == IWL_MVM_SCAN_OS) + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + mvm->scan_status = IWL_MVM_SCAN_NONE; if (notify) { @@ -1124,20 +1185,64 @@ iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm, } } +static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies, + size_t len, u8 *const pos) +{ + static const u8 before_ds_params[] = { + WLAN_EID_SSID, + WLAN_EID_SUPP_RATES, + WLAN_EID_REQUEST, + WLAN_EID_EXT_SUPP_RATES, + }; + size_t offs; + u8 *newpos = pos; + + if (!iwl_mvm_rrm_scan_needed(mvm)) { + memcpy(newpos, ies, len); + return newpos + len; + } + + offs = ieee80211_ie_split(ies, len, + before_ds_params, + ARRAY_SIZE(before_ds_params), + 0); + + memcpy(newpos, ies, offs); + newpos += offs; + + /* Add a placeholder for DS Parameter Set element */ + *newpos++ = WLAN_EID_DS_PARAMS; + *newpos++ = 1; + *newpos++ = 0; + + memcpy(newpos, ies + offs, len - offs); + newpos += len - offs; + + return newpos; +} + static void iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_scan_ies *ies, - struct iwl_scan_req_unified_lmac *cmd) + struct iwl_scan_probe_req *preq, + const u8 *mac_addr, const u8 *mac_addr_mask) { - struct iwl_scan_probe_req *preq = (void *)(cmd->data + - sizeof(struct iwl_scan_channel_cfg_lmac) * - mvm->fw->ucode_capa.n_scan_channels); struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf; - u8 *pos; + u8 *pos, *newpos; + + /* + * Unfortunately, right now the offload scan doesn't support randomising + * within the firmware, so until the firmware API is ready we implement + * it in the driver. This means that the scan iterations won't really be + * random, only when it's restarted, but at least that helps a bit. + */ + if (mac_addr) + get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask); + else + memcpy(frame->sa, vif->addr, ETH_ALEN); frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); eth_broadcast_addr(frame->da); - memcpy(frame->sa, vif->addr, ETH_ALEN); eth_broadcast_addr(frame->bssid); frame->seq_ctrl = 0; @@ -1148,11 +1253,14 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, preq->mac_header.offset = 0; preq->mac_header.len = cpu_to_le16(24 + 2); - memcpy(pos, ies->ies[IEEE80211_BAND_2GHZ], - ies->len[IEEE80211_BAND_2GHZ]); + /* Insert ds parameter set element on 2.4 GHz band */ + newpos = iwl_mvm_copy_and_insert_ds_elem(mvm, + ies->ies[IEEE80211_BAND_2GHZ], + ies->len[IEEE80211_BAND_2GHZ], + pos); preq->band_data[0].offset = cpu_to_le16(pos - preq->buf); - preq->band_data[0].len = cpu_to_le16(ies->len[IEEE80211_BAND_2GHZ]); - pos += ies->len[IEEE80211_BAND_2GHZ]; + preq->band_data[0].len = cpu_to_le16(newpos - pos); + pos = newpos; memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ], ies->len[IEEE80211_BAND_5GHZ]); @@ -1213,9 +1321,10 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd; + struct iwl_scan_probe_req *preq; struct iwl_mvm_scan_params params = {}; u32 flags; - int ssid_bitmap = 0; + u32 ssid_bitmap = 0; int ret, i; lockdep_assert_held(&mvm->mutex); @@ -1274,7 +1383,13 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, req->req.n_channels, ssid_bitmap, cmd); - iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, cmd); + preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) * + mvm->fw->ucode_capa.n_scan_channels); + + iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq, + req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? + req->req.mac_addr : NULL, + req->req.mac_addr_mask); ret = iwl_mvm_send_cmd(mvm, &hcmd); if (!ret) { @@ -1307,6 +1422,7 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd; + struct iwl_scan_probe_req *preq; struct iwl_mvm_scan_params params = {}; int ret; u32 flags = 0, ssid_bitmap = 0; @@ -1330,15 +1446,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, cmd->n_channels = (u8)req->n_channels; - if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { - IWL_DEBUG_SCAN(mvm, - "Sending scheduled scan with filtering, n_match_sets %d\n", - req->n_match_sets); - } else { - IWL_DEBUG_SCAN(mvm, - "Sending Scheduled scan without filtering\n"); + if (iwl_mvm_scan_pass_all(mvm, req)) flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; - } if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; @@ -1368,7 +1477,13 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, ssid_bitmap, cmd); - iwl_mvm_build_unified_scan_probe(mvm, vif, ies, cmd); + preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) * + mvm->fw->ucode_capa.n_scan_channels); + + iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq, + req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? + req->mac_addr : NULL, + req->mac_addr_mask); ret = iwl_mvm_send_cmd(mvm, &hcmd); if (!ret) { @@ -1390,6 +1505,10 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) { + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN, + true); + if (mvm->scan_status == IWL_MVM_SCAN_NONE) return 0; @@ -1404,3 +1523,576 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) return iwl_mvm_scan_offload_stop(mvm, true); return iwl_mvm_cancel_regular_scan(mvm); } + +/* UMAC scan API */ + +struct iwl_umac_scan_done { + struct iwl_mvm *mvm; + enum iwl_umac_scan_uid_type type; +}; + +static int rate_to_scan_rate_flag(unsigned int rate) +{ + static const int rate_to_scan_rate[IWL_RATE_COUNT] = { + [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M, + [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M, + [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M, + [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M, + [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M, + [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M, + [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M, + [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M, + [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M, + [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M, + [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M, + [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M, + }; + + return rate_to_scan_rate[rate]; +} + +static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) +{ + struct ieee80211_supported_band *band; + unsigned int rates = 0; + int i; + + band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + for (i = 0; i < band->n_bitrates; i++) + rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); + band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + for (i = 0; i < band->n_bitrates; i++) + rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); + + /* Set both basic rates and supported rates */ + rates |= SCAN_CONFIG_SUPPORTED_RATE(rates); + + return cpu_to_le32(rates); +} + +int iwl_mvm_config_scan(struct iwl_mvm *mvm) +{ + + struct iwl_scan_config *scan_config; + struct ieee80211_supported_band *band; + int num_channels = + mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + + mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; + int ret, i, j = 0, cmd_size, data_size; + struct iwl_host_cmd cmd = { + .id = SCAN_CFG_CMD, + }; + + if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) + return -ENOBUFS; + + cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; + + scan_config = kzalloc(cmd_size, GFP_KERNEL); + if (!scan_config) + return -ENOMEM; + + data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr); + scan_config->hdr.size = cpu_to_le16(data_size); + scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE | + SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | + SCAN_CONFIG_FLAG_SET_TX_CHAINS | + SCAN_CONFIG_FLAG_SET_RX_CHAINS | + SCAN_CONFIG_FLAG_SET_ALL_TIMES | + SCAN_CONFIG_FLAG_SET_LEGACY_RATES | + SCAN_CONFIG_FLAG_SET_MAC_ADDR | + SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS| + SCAN_CONFIG_N_CHANNELS(num_channels)); + scan_config->tx_chains = cpu_to_le32(mvm->fw->valid_tx_ant); + scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); + scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm); + scan_config->out_of_channel_time = cpu_to_le32(170); + scan_config->suspend_time = cpu_to_le32(30); + scan_config->dwell_active = 20; + scan_config->dwell_passive = 110; + scan_config->dwell_fragmented = 20; + + memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); + + scan_config->bcast_sta_id = mvm->aux_sta.sta_id; + scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS | + IWL_CHANNEL_FLAG_ACCURATE_EBS | + IWL_CHANNEL_FLAG_EBS_ADD | + IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; + + band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + for (i = 0; i < band->n_channels; i++, j++) + scan_config->channel_array[j] = band->channels[i].center_freq; + band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + for (i = 0; i < band->n_channels; i++, j++) + scan_config->channel_array[j] = band->channels[i].center_freq; + + cmd.data[0] = scan_config; + cmd.len[0] = cmd_size; + cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + + IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + + kfree(scan_config); + return ret; +} + +static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid) +{ + int i; + + for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) + if (mvm->scan_uid[i] == uid) + return i; + + return i; +} + +static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm) +{ + return iwl_mvm_find_scan_uid(mvm, 0); +} + +static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm, + enum iwl_umac_scan_uid_type type) +{ + int i; + + for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) + if (mvm->scan_uid[i] & type) + return true; + + return false; +} + +static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm, + enum iwl_umac_scan_uid_type type) +{ + u32 uid; + + /* make sure exactly one bit is on in scan type */ + WARN_ON(hweight8(type) != 1); + + /* + * Make sure scan uids are unique. If one scan lasts long time while + * others are completing frequently, the seq number will wrap up and + * we may have more than one scan with the same uid. + */ + do { + uid = type | (mvm->scan_seq_num << + IWL_UMAC_SCAN_UID_SEQ_OFFSET); + mvm->scan_seq_num++; + } while (iwl_mvm_find_scan_uid(mvm, uid) < + IWL_MVM_MAX_SIMULTANEOUS_SCANS); + + IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid); + + return uid; +} + +static void +iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm, + struct iwl_scan_req_umac *cmd, + struct iwl_mvm_scan_params *params) +{ + memset(cmd, 0, ksize(cmd)); + cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) - + sizeof(struct iwl_mvm_umac_cmd_hdr)); + cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active; + cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; + if (params->passive_fragmented) + cmd->fragmented_dwell = + params->dwell[IEEE80211_BAND_2GHZ].passive; + cmd->max_out_time = cpu_to_le32(params->max_out_time); + cmd->suspend_time = cpu_to_le32(params->suspend_time); + cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); +} + +static void +iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, + struct ieee80211_channel **channels, + int n_channels, u32 ssid_bitmap, + struct iwl_scan_req_umac *cmd) +{ + struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data; + int i; + + for (i = 0; i < n_channels; i++) { + channel_cfg[i].flags = cpu_to_le32(ssid_bitmap); + channel_cfg[i].channel_num = channels[i]->hw_value; + channel_cfg[i].iter_count = 1; + channel_cfg[i].iter_interval = 0; + } +} + +static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids, + struct cfg80211_ssid *ssids, + int fragmented) +{ + int flags = 0; + + if (n_ssids == 0) + flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE; + + if (n_ssids == 1 && ssids[0].ssid_len != 0) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; + + if (fragmented) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; + + if (iwl_mvm_rrm_scan_needed(mvm)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED; + + return flags; +} + +int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req) +{ + struct iwl_host_cmd hcmd = { + .id = SCAN_REQ_UMAC, + .len = { iwl_mvm_scan_size(mvm), }, + .data = { mvm->scan_cmd, }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data + + sizeof(struct iwl_scan_channel_cfg_umac) * + mvm->fw->ucode_capa.n_scan_channels; + struct iwl_mvm_scan_params params = {}; + u32 uid, flags; + u32 ssid_bitmap = 0; + int ret, i, uid_idx; + + lockdep_assert_held(&mvm->mutex); + + uid_idx = iwl_mvm_find_free_scan_uid(mvm); + if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS) + return -EBUSY; + + /* we should have failed registration if scan_cmd was NULL */ + if (WARN_ON(mvm->scan_cmd == NULL)) + return -ENOMEM; + + if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX || + req->ies.common_ie_len + + req->ies.len[NL80211_BAND_2GHZ] + + req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 > + SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels > + mvm->fw->ucode_capa.n_scan_channels)) + return -ENOBUFS; + + iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags, + ¶ms); + + iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, ¶ms); + + uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN); + mvm->scan_uid[uid_idx] = uid; + cmd->uid = cpu_to_le32(uid); + + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); + + flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids, + req->req.ssids, + params.passive_fragmented); + + flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL; + + cmd->general_flags = cpu_to_le32(flags); + cmd->n_channels = req->req.n_channels; + + for (i = 0; i < req->req.n_ssids; i++) + ssid_bitmap |= BIT(i); + + iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels, + req->req.n_channels, ssid_bitmap, cmd); + + sec_part->schedule[0].iter_count = 1; + sec_part->delay = 0; + + iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq, + req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? + req->req.mac_addr : NULL, + req->req.mac_addr_mask); + + iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids, + req->req.n_ssids, 0); + + ret = iwl_mvm_send_cmd(mvm, &hcmd); + if (!ret) { + IWL_DEBUG_SCAN(mvm, + "Scan request was sent successfully\n"); + } else { + /* + * If the scan failed, it usually means that the FW was unable + * to allocate the time events. Warn on it, but maybe we + * should try to send the command again with different params. + */ + IWL_ERR(mvm, "Scan failed! ret %d\n", ret); + } + return ret; +} + +int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + + struct iwl_host_cmd hcmd = { + .id = SCAN_REQ_UMAC, + .len = { iwl_mvm_scan_size(mvm), }, + .data = { mvm->scan_cmd, }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data + + sizeof(struct iwl_scan_channel_cfg_umac) * + mvm->fw->ucode_capa.n_scan_channels; + struct iwl_mvm_scan_params params = {}; + u32 uid, flags; + u32 ssid_bitmap = 0; + int ret, uid_idx; + + lockdep_assert_held(&mvm->mutex); + + uid_idx = iwl_mvm_find_free_scan_uid(mvm); + if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS) + return -EBUSY; + + /* we should have failed registration if scan_cmd was NULL */ + if (WARN_ON(mvm->scan_cmd == NULL)) + return -ENOMEM; + + if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX || + ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] + + ies->len[NL80211_BAND_5GHZ] + 24 + 2 > + SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels > + mvm->fw->ucode_capa.n_scan_channels)) + return -ENOBUFS; + + iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, + ¶ms); + + iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, ¶ms); + + cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); + + uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN); + mvm->scan_uid[uid_idx] = uid; + cmd->uid = cpu_to_le32(uid); + + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW); + + flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids, + params.passive_fragmented); + + flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; + + if (iwl_mvm_scan_pass_all(mvm, req)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL; + else + flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH; + + cmd->general_flags = cpu_to_le32(flags); + + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && + mvm->last_ebs_successful) + cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; + + cmd->n_channels = req->n_channels; + + iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap, + false); + + /* This API uses bits 0-19 instead of 1-20. */ + ssid_bitmap = ssid_bitmap >> 1; + + iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels, + ssid_bitmap, cmd); + + sec_part->schedule[0].interval = + cpu_to_le16(req->interval / MSEC_PER_SEC); + sec_part->schedule[0].iter_count = 0xff; + + sec_part->delay = 0; + + iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq, + req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? + req->mac_addr : NULL, + req->mac_addr_mask); + + ret = iwl_mvm_send_cmd(mvm, &hcmd); + if (!ret) { + IWL_DEBUG_SCAN(mvm, + "Sched scan request was sent successfully\n"); + } else { + /* + * If the scan failed, it usually means that the FW was unable + * to allocate the time events. Warn on it, but maybe we + * should try to send the command again with different params. + */ + IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret); + } + return ret; +} + +int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_umac_scan_complete *notif = (void *)pkt->data; + u32 uid = __le32_to_cpu(notif->uid); + bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN); + int uid_idx = iwl_mvm_find_scan_uid(mvm, uid); + + /* + * Scan uid may be set to zero in case of scan abort request from above. + */ + if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS) + return 0; + + IWL_DEBUG_SCAN(mvm, + "Scan completed, uid %u type %s, status %s, EBS status %s\n", + uid, sched ? "sched" : "regular", + notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? + "completed" : "aborted", + notif->ebs_status == IWL_SCAN_EBS_SUCCESS ? + "success" : "failed"); + + mvm->last_ebs_successful = !notif->ebs_status; + mvm->scan_uid[uid_idx] = 0; + + if (!sched) { + ieee80211_scan_completed(mvm->hw, + notif->status == + IWL_SCAN_OFFLOAD_ABORTED); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) { + ieee80211_sched_scan_stopped(mvm->hw); + } else { + IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n"); + } + + return 0; +} + +static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_umac_scan_done *scan_done = data; + struct iwl_umac_scan_complete *notif = (void *)pkt->data; + u32 uid = __le32_to_cpu(notif->uid); + int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid); + + if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC)) + return false; + + if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS) + return false; + + /* + * Clear scan uid of scans that was aborted from above and completed + * in FW so the RX handler does nothing. + */ + scan_done->mvm->scan_uid[uid_idx] = 0; + + return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type); +} + +static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid) +{ + struct iwl_umac_scan_abort cmd = { + .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) - + sizeof(struct iwl_mvm_umac_cmd_hdr)), + .uid = cpu_to_le32(uid), + }; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); + + return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); +} + +static int iwl_umac_scan_stop(struct iwl_mvm *mvm, + enum iwl_umac_scan_uid_type type, bool notify) +{ + struct iwl_notification_wait wait_scan_done; + static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, }; + struct iwl_umac_scan_done scan_done = { + .mvm = mvm, + .type = type, + }; + int i, ret = -EIO; + + iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, + scan_done_notif, + ARRAY_SIZE(scan_done_notif), + iwl_scan_umac_done_check, &scan_done); + + IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); + + for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) { + if (mvm->scan_uid[i] & type) { + int err; + + if (iwl_mvm_is_radio_killed(mvm) && + (type & IWL_UMAC_SCAN_UID_REG_SCAN)) { + ieee80211_scan_completed(mvm->hw, true); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + break; + } + + err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]); + if (!err) + ret = 0; + } + } + + if (ret) { + IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n"); + iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); + return ret; + } + + ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); + if (ret) + return ret; + + if (notify) { + if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN) + ieee80211_sched_scan_stopped(mvm->hw); + if (type & IWL_UMAC_SCAN_UID_REG_SCAN) { + ieee80211_scan_completed(mvm->hw, true); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + } + } + + return ret; +} + +int iwl_mvm_scan_size(struct iwl_mvm *mvm) +{ + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + return sizeof(struct iwl_scan_req_umac) + + sizeof(struct iwl_scan_channel_cfg_umac) * + mvm->fw->ucode_capa.n_scan_channels + + sizeof(struct iwl_scan_req_umac_tail); + + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) + return sizeof(struct iwl_scan_req_unified_lmac) + + sizeof(struct iwl_scan_channel_cfg_lmac) * + mvm->fw->ucode_capa.n_scan_channels + + sizeof(struct iwl_scan_probe_req); + + return sizeof(struct iwl_scan_cmd) + + mvm->fw->ucode_capa.max_probe_length + + mvm->fw->ucode_capa.n_scan_channels * + sizeof(struct iwl_scan_channel); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 1731c205c81db57929da67a4527b90190316fa71..d86fe432e51f17ce0d4f7ce8897fffd3fbace3f6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -204,6 +204,56 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return ret; } +static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + unsigned long used_hw_queues; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + u32 ac; + + lockdep_assert_held(&mvm->mutex); + + used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); + + /* Find available queues, and allocate them to the ACs */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + u8 queue = find_first_zero_bit(&used_hw_queues, + mvm->first_agg_queue); + + if (queue >= mvm->first_agg_queue) { + IWL_ERR(mvm, "Failed to allocate STA queue\n"); + return -EBUSY; + } + + __set_bit(queue, &used_hw_queues); + mvmsta->hw_queue[ac] = queue; + } + + /* Found a place for all queues - enable them */ + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], + iwl_mvm_ac_to_tx_fifo[ac]); + mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); + } + + return 0; +} + +static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + unsigned long sta_msk; + int i; + + lockdep_assert_held(&mvm->mutex); + + /* disable the TDLS STA-specific queues */ + sta_msk = mvmsta->tfd_queue_msk; + for_each_set_bit(i, &sta_msk, sizeof(sta_msk)) + iwl_mvm_disable_txq(mvm, i); +} + int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -237,9 +287,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, atomic_set(&mvm->pending_frames[sta_id], 0); mvm_sta->tid_disable_agg = 0; mvm_sta->tfd_queue_msk = 0; - for (i = 0; i < IEEE80211_NUM_ACS; i++) - if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) - mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); + + /* allocate new queues for a TDLS station */ + if (sta->tdls) { + ret = iwl_mvm_tdls_sta_init(mvm, sta); + if (ret) + return ret; + } else { + for (i = 0; i < IEEE80211_NUM_ACS; i++) + if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) + mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); + } /* for HW restart - reset everything but the sequence number */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { @@ -251,7 +309,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); if (ret) - return ret; + goto err; if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { @@ -265,6 +323,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); return 0; + +err: + iwl_mvm_tdls_sta_deinit(mvm, sta); + return ret; } int iwl_mvm_update_sta(struct iwl_mvm *mvm, @@ -398,6 +460,17 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk) } RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); clear_bit(sta_id, mvm->sta_drained); + + if (mvm->tfd_drained[sta_id]) { + unsigned long i, msk = mvm->tfd_drained[sta_id]; + + for_each_set_bit(i, &msk, sizeof(msk)) + iwl_mvm_disable_txq(mvm, i); + + mvm->tfd_drained[sta_id] = 0; + IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", + sta_id, msk); + } } mutex_unlock(&mvm->mutex); @@ -430,6 +503,15 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; } + /* + * This shouldn't happen - the TDLS channel switch should be canceled + * before the STA is removed. + */ + if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) { + mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + cancel_delayed_work(&mvm->tdls_cs.dwork); + } + /* * Make sure that the tx response code sees the station as -EBUSY and * calls the drain worker. @@ -443,9 +525,22 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-EBUSY)); spin_unlock_bh(&mvm_sta->lock); + + /* disable TDLS sta queues on drain complete */ + if (sta->tdls) { + mvm->tfd_drained[mvm_sta->sta_id] = + mvm_sta->tfd_queue_msk; + IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", + mvm_sta->sta_id); + } + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); } else { spin_unlock_bh(&mvm_sta->lock); + + if (sta->tdls) + iwl_mvm_tdls_sta_deinit(mvm, sta); + ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); } @@ -609,7 +704,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); + qmask = iwl_mvm_mac_get_queues_mask(vif); /* * The firmware defines the TFD queue mask to only be relevant @@ -1071,15 +1166,16 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, - struct ieee80211_key_conf *keyconf, - u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k, - u32 cmd_flags) + struct ieee80211_key_conf *keyconf, bool mcast, + u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags) { struct iwl_mvm_add_sta_key_cmd cmd = {}; __le16 key_flags; - int ret, status; + int ret; + u32 status; u16 keyidx; int i; + u8 sta_id = mvm_sta->sta_id; keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK; @@ -1098,12 +1194,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); memcpy(cmd.key, keyconf->key, keyconf->keylen); break; + case WLAN_CIPHER_SUITE_WEP104: + key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); + case WLAN_CIPHER_SUITE_WEP40: + key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); + memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); + break; default: key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); memcpy(cmd.key, keyconf->key, keyconf->keylen); } - if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); cmd.key_offset = keyconf->hw_key_idx; @@ -1195,17 +1297,88 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, return NULL; } +static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf, + bool mcast) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + int ret; + const u8 *addr; + struct ieee80211_key_seq seq; + u16 p1k[5]; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + addr = iwl_mvm_get_mac_addr(mvm, vif, sta); + /* get phase 1 key from mac80211 */ + ieee80211_get_key_rx_seq(keyconf, 0, &seq); + ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); + ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, + seq.tkip.iv32, p1k, 0); + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, + 0, NULL, 0); + break; + default: + ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, + 0, NULL, 0); + } + + return ret; +} + +static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, + struct ieee80211_key_conf *keyconf, + bool mcast) +{ + struct iwl_mvm_add_sta_key_cmd cmd = {}; + __le16 key_flags; + int ret; + u32 status; + + key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & + STA_KEY_FLG_KEYID_MSK); + key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); + key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); + + if (mcast) + key_flags |= cpu_to_le16(STA_KEY_MULTICAST); + + cmd.key_flags = key_flags; + cmd.key_offset = keyconf->hw_key_idx; + cmd.sta_id = sta_id; + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), + &cmd, &status); + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); + break; + } + + return ret; +} + int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, bool have_key_offset) { - struct iwl_mvm_sta *mvm_sta; + bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); + u8 sta_id; int ret; - u8 *addr, sta_id; - struct ieee80211_key_seq seq; - u16 p1k[5]; lockdep_assert_held(&mvm->mutex); @@ -1234,8 +1407,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, } } - mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv; - if (WARN_ON_ONCE(mvm_sta->vif != vif)) + if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; if (!have_key_offset) { @@ -1249,26 +1421,26 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, return -ENOSPC; } - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_TKIP: - addr = iwl_mvm_get_mac_addr(mvm, vif, sta); - /* get phase 1 key from mac80211 */ - ieee80211_get_key_rx_seq(keyconf, 0, &seq); - ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); - ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, - seq.tkip.iv32, p1k, 0); - break; - case WLAN_CIPHER_SUITE_CCMP: - ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, - 0, NULL, 0); - break; - default: - ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, - sta_id, 0, NULL, 0); + ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast); + if (ret) { + __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); + goto end; } - if (ret) - __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); + /* + * For WEP, the same key is used for multicast and unicast. Upload it + * again, using the same key offset, and now pointing the other one + * to the same key slot (offset). + * If this fails, remove the original as well. + */ + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { + ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast); + if (ret) { + __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); + __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); + } + } end: IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", @@ -1282,11 +1454,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf) { - struct iwl_mvm_sta *mvm_sta; - struct iwl_mvm_add_sta_key_cmd cmd = {}; - __le16 key_flags; - int ret, status; + bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); u8 sta_id; + int ret; lockdep_assert_held(&mvm->mutex); @@ -1299,8 +1469,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); - ret = __test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); - if (!ret) { + if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { IWL_ERR(mvm, "offset %d not used in fw key table.\n", keyconf->hw_key_idx); return -ENOENT; @@ -1326,35 +1495,17 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, } } - mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv; - if (WARN_ON_ONCE(mvm_sta->vif != vif)) + if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; - key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & - STA_KEY_FLG_KEYID_MSK); - key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); - key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); - - if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - - cmd.key_flags = key_flags; - cmd.key_offset = keyconf->hw_key_idx; - cmd.sta_id = sta_id; - - status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), - &cmd, &status); + ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); + if (ret) + return ret; - switch (status) { - case ADD_STA_SUCCESS: - IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); - break; - default: - ret = -EIO; - IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); - break; - } + /* delete WEP key twice to get rid of (now useless) offset */ + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) + ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); return ret; } @@ -1367,6 +1518,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvm_sta; u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); + bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) return; @@ -1381,8 +1533,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, } } - mvm_sta = (void *)sta->drv_priv; - iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, iv32, phase1key, CMD_ASYNC); rcu_read_unlock(); } @@ -1580,3 +1732,18 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); } } + +void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvmsta; + + rcu_read_lock(); + + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + + if (!WARN_ON(!mvmsta)) + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); + + rcu_read_unlock(); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index d9c0d7b0e9d4159506ead71fcbe148aa1d335a81..d8f48975ad087db1fad57b9f764fcb137416e2d0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -264,6 +264,7 @@ enum iwl_mvm_agg_state { * the first packet to be sent in legacy HW queue in Tx AGG stop flow. * Basically when next_reclaimed reaches ssn, we can tell mac80211 that * we are ready to finish the Tx AGG stop / start flow. + * @tx_time: medium time consumed by this A-MPDU */ struct iwl_mvm_tid_data { u16 seq_number; @@ -274,6 +275,7 @@ struct iwl_mvm_tid_data { enum iwl_mvm_agg_state state; u16 txq_id; u16 ssn; + u16 tx_time; }; static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) @@ -286,6 +288,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) * struct iwl_mvm_sta - representation of a station in the driver * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @tfd_queue_msk: the tfd queues used by the station + * @hw_queue: per-AC mapping of the TFD queues used by station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. @@ -309,6 +312,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) struct iwl_mvm_sta { u32 sta_id; u32 tfd_queue_msk; + u8 hw_queue[IEEE80211_NUM_ACS]; u32 mac_id_n_color; u16 tid_disable_agg; u8 max_agg_bufsize; @@ -418,5 +422,6 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable); +void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #endif /* __sta_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c index 66c82df2d0a16eb948e011a263af0abd3d031db1..c0e00bae5bd0622090de58a17f2f9b4fcc35e22c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c @@ -61,9 +61,13 @@ * *****************************************************************************/ +#include #include "mvm.h" #include "time-event.h" +#define TU_TO_US(x) (x * 1024) +#define TU_TO_MS(x) (TU_TO_US(x) / 1000) + void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) { struct ieee80211_sta *sta; @@ -113,17 +117,85 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return count; } +static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_rx_packet *pkt; + struct iwl_tdls_config_res *resp; + struct iwl_tdls_config_cmd tdls_cfg_cmd = {}; + struct iwl_host_cmd cmd = { + .id = TDLS_CONFIG_CMD, + .flags = CMD_WANT_SKB, + .data = { &tdls_cfg_cmd, }, + .len = { sizeof(struct iwl_tdls_config_cmd), }, + }; + struct ieee80211_sta *sta; + int ret, i, cnt; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + tdls_cfg_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID; + tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */ + + /* for now the Tx cmd is empty and unused */ + + /* populate TDLS peer data */ + cnt = 0; + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta) || !sta->tdls) + continue; + + tdls_cfg_cmd.sta_info[cnt].sta_id = i; + tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid = + IWL_MVM_TDLS_FW_TID; + tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0); + tdls_cfg_cmd.sta_info[cnt].is_initiator = + cpu_to_le32(sta->tdls_initiator ? 1 : 0); + + cnt++; + } + + tdls_cfg_cmd.tdls_peer_count = cnt; + IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (WARN_ON_ONCE(ret)) + return; + + pkt = cmd.resp_pkt; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n", + pkt->hdr.flags); + goto exit; + } + + if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp))) + goto exit; + + /* we don't really care about the response at this point */ + +exit: + iwl_free_resp(&cmd); +} + void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added) { int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); - /* - * Disable ps when the first TDLS sta is added and re-enable it - * when the last TDLS sta is removed - */ - if ((tdls_sta_cnt == 1 && sta_added) || - (tdls_sta_cnt == 0 && !sta_added)) + /* when the first peer joins, send a power update first */ + if (tdls_sta_cnt == 1 && sta_added) + iwl_mvm_power_update_mac(mvm); + + /* configure the FW with TDLS peer info */ + iwl_mvm_tdls_config(mvm, vif); + + /* when the last peer leaves, send a power update last */ + if (tdls_sta_cnt == 0 && !sta_added) iwl_mvm_power_update_mac(mvm); } @@ -147,3 +219,488 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS); } + +static const char * +iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state) +{ + switch (state) { + case IWL_MVM_TDLS_SW_IDLE: + return "IDLE"; + case IWL_MVM_TDLS_SW_REQ_SENT: + return "REQ SENT"; + case IWL_MVM_TDLS_SW_REQ_RCVD: + return "REQ RECEIVED"; + case IWL_MVM_TDLS_SW_ACTIVE: + return "ACTIVE"; + } + + return NULL; +} + +static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, + enum iwl_mvm_tdls_cs_state state) +{ + if (mvm->tdls_cs.state == state) + return; + + IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n", + iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state), + iwl_mvm_tdls_cs_state_str(state)); + mvm->tdls_cs.state = state; + + if (state == IWL_MVM_TDLS_SW_IDLE) + mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; +} + +int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data; + struct ieee80211_sta *sta; + unsigned int delay; + struct iwl_mvm_sta *mvmsta; + struct ieee80211_vif *vif; + u32 sta_id = le32_to_cpu(notif->sta_id); + + lockdep_assert_held(&mvm->mutex); + + /* can fail sometimes */ + if (!le32_to_cpu(notif->status)) { + iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); + goto out; + } + + if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) + goto out; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + /* the station may not be here, but if it is, it must be a TDLS peer */ + if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) + goto out; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + vif = mvmsta->vif; + + /* + * Update state and possibly switch again after this is over (DTIM). + * Also convert TU to msec. + */ + delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); + mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, + msecs_to_jiffies(delay)); + + iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); + +out: + return 0; +} + +static int +iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, + enum iwl_tdls_channel_switch_type type, + const u8 *peer, bool peer_initiator) +{ + bool same_peer = false; + int ret = 0; + + /* get the existing peer if it's there */ + if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && + mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { + struct ieee80211_sta *sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], + lockdep_is_held(&mvm->mutex)); + if (!IS_ERR_OR_NULL(sta)) + same_peer = ether_addr_equal(peer, sta->addr); + } + + switch (mvm->tdls_cs.state) { + case IWL_MVM_TDLS_SW_IDLE: + /* + * might be spurious packet from the peer after the switch is + * already done + */ + if (type == TDLS_MOVE_CH) + ret = -EINVAL; + break; + case IWL_MVM_TDLS_SW_REQ_SENT: + /* + * We received a ch-switch request while an outgoing one is + * pending. Allow it to proceed if the other peer is the same + * one we sent to, and we are not the link initiator. + */ + if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH) { + if (!same_peer) + ret = -EBUSY; + else if (!peer_initiator) /* we are the initiator */ + ret = -EBUSY; + } + break; + case IWL_MVM_TDLS_SW_REQ_RCVD: + /* as above, allow the link initiator to proceed */ + if (type == TDLS_SEND_CHAN_SW_REQ) { + if (!same_peer) + ret = -EBUSY; + else if (peer_initiator) /* they are the initiator */ + ret = -EBUSY; + } else if (type == TDLS_MOVE_CH) { + ret = -EINVAL; + } + break; + case IWL_MVM_TDLS_SW_ACTIVE: + /* we don't allow initiations during active channel switch */ + if (type == TDLS_SEND_CHAN_SW_REQ) + ret = -EINVAL; + break; + } + + if (ret) + IWL_DEBUG_TDLS(mvm, + "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n", + type, mvm->tdls_cs.state, peer, same_peer, + peer_initiator); + + return ret; +} + +static int +iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_tdls_channel_switch_type type, + const u8 *peer, bool peer_initiator, + u8 oper_class, + struct cfg80211_chan_def *chandef, + u32 timestamp, u16 switch_time, + u16 switch_timeout, struct sk_buff *skb, + u32 ch_sw_tm_ie) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + struct ieee80211_tx_info *info; + struct ieee80211_hdr *hdr; + struct iwl_tdls_channel_switch_cmd cmd = {0}; + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator); + if (ret) + return ret; + + if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) { + ret = -EINVAL; + goto out; + } + + cmd.switch_type = type; + cmd.timing.frame_timestamp = cpu_to_le32(timestamp); + cmd.timing.switch_time = cpu_to_le32(switch_time); + cmd.timing.switch_timeout = cpu_to_le32(switch_timeout); + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, peer); + if (!sta) { + rcu_read_unlock(); + ret = -ENOENT; + goto out; + } + mvmsta = iwl_mvm_sta_from_mac80211(sta); + cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id); + + if (!chandef) { + if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && + mvm->tdls_cs.peer.chandef.chan) { + /* actually moving to the channel */ + chandef = &mvm->tdls_cs.peer.chandef; + } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE && + type == TDLS_MOVE_CH) { + /* we need to return to base channel */ + struct ieee80211_chanctx_conf *chanctx = + rcu_dereference(vif->chanctx_conf); + + if (WARN_ON_ONCE(!chanctx)) { + rcu_read_unlock(); + goto out; + } + + chandef = &chanctx->def; + } + } + + if (chandef) { + cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? + PHY_BAND_24 : PHY_BAND_5); + cmd.ci.channel = chandef->chan->hw_value; + cmd.ci.width = iwl_mvm_get_channel_width(chandef); + cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); + } + + /* keep quota calculation simple for now - 50% of DTIM for TDLS */ + cmd.timing.max_offchan_duration = + cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * + vif->bss_conf.beacon_int) / 2); + + /* Switch time is the first element in the switch-timing IE. */ + cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); + + info = IEEE80211_SKB_CB(skb); + if (info->control.hw_key) + iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb); + + iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, + mvmsta->sta_id); + + hdr = (void *)skb->data; + iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, + hdr->frame_control); + rcu_read_unlock(); + + memcpy(cmd.frame.data, skb->data, skb->len); + + ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, + sizeof(cmd), &cmd); + if (ret) { + IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", + ret); + goto out; + } + + /* channel switch has started, update state */ + if (type != TDLS_MOVE_CH) { + mvm->tdls_cs.cur_sta_id = mvmsta->sta_id; + iwl_mvm_tdls_update_cs_state(mvm, + type == TDLS_SEND_CHAN_SW_REQ ? + IWL_MVM_TDLS_SW_REQ_SENT : + IWL_MVM_TDLS_SW_REQ_RCVD); + } + +out: + + /* channel switch failed - we are idle */ + if (ret) + iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); + + return ret; +} + +void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) +{ + struct iwl_mvm *mvm; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + struct ieee80211_vif *vif; + unsigned int delay; + int ret; + + mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); + mutex_lock(&mvm->mutex); + + /* called after an active channel switch has finished or timed-out */ + iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); + + /* station might be gone, in that case do nothing */ + if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) + goto out; + + sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], + lockdep_is_held(&mvm->mutex)); + /* the station may not be here, but if it is, it must be a TDLS peer */ + if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) + goto out; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + vif = mvmsta->vif; + ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, + TDLS_SEND_CHAN_SW_REQ, + sta->addr, + mvm->tdls_cs.peer.initiator, + mvm->tdls_cs.peer.op_class, + &mvm->tdls_cs.peer.chandef, + 0, 0, 0, + mvm->tdls_cs.peer.skb, + mvm->tdls_cs.peer.ch_sw_tm_ie); + if (ret) + IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret); + + /* retry after a DTIM if we failed sending now */ + delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); + queue_delayed_work(system_wq, &mvm->tdls_cs.dwork, + msecs_to_jiffies(delay)); +out: + mutex_unlock(&mvm->mutex); +} + +int +iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_sta *mvmsta; + unsigned int delay; + int ret; + + mutex_lock(&mvm->mutex); + + IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", + sta->addr, chandef->chan->center_freq, chandef->width); + + /* we only support a single peer for channel switching */ + if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) { + IWL_DEBUG_TDLS(mvm, + "Existing peer. Can't start switch with %pM\n", + sta->addr); + ret = -EBUSY; + goto out; + } + + ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, + TDLS_SEND_CHAN_SW_REQ, + sta->addr, sta->tdls_initiator, + oper_class, chandef, 0, 0, 0, + tmpl_skb, ch_sw_tm_ie); + if (ret) + goto out; + + /* + * Mark the peer as "in tdls switch" for this vif. We only allow a + * single such peer per vif. + */ + mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL); + if (!mvm->tdls_cs.peer.skb) { + ret = -ENOMEM; + goto out; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvm->tdls_cs.peer.sta_id = mvmsta->sta_id; + mvm->tdls_cs.peer.chandef = *chandef; + mvm->tdls_cs.peer.initiator = sta->tdls_initiator; + mvm->tdls_cs.peer.op_class = oper_class; + mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie; + + /* + * Wait for 2 DTIM periods before attempting the next switch. The next + * switch will be made sooner if the current one completes before that. + */ + delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * + vif->bss_conf.beacon_int); + mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, + msecs_to_jiffies(delay)); + +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_sta *cur_sta; + bool wait_for_phy = false; + + mutex_lock(&mvm->mutex); + + IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); + + /* we only support a single peer for channel switching */ + if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) { + IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); + goto out; + } + + cur_sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], + lockdep_is_held(&mvm->mutex)); + /* make sure it's the same peer */ + if (cur_sta != sta) + goto out; + + /* + * If we're currently in a switch because of the now canceled peer, + * wait a DTIM here to make sure the phy is back on the base channel. + * We can't otherwise force it. + */ + if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id && + mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) + wait_for_phy = true; + + mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + dev_kfree_skb(mvm->tdls_cs.peer.skb); + mvm->tdls_cs.peer.skb = NULL; + +out: + mutex_unlock(&mvm->mutex); + + /* make sure the phy is on the base channel */ + if (wait_for_phy) + msleep(TU_TO_MS(vif->bss_conf.dtim_period * + vif->bss_conf.beacon_int)); + + /* flush the channel switch state */ + flush_delayed_work(&mvm->tdls_cs.dwork); + + IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr); +} + +void +iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_tdls_ch_sw_params *params) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + enum iwl_tdls_channel_switch_type type; + unsigned int delay; + + mutex_lock(&mvm->mutex); + + IWL_DEBUG_TDLS(mvm, + "Received TDLS ch switch action %d from %pM status %d\n", + params->action_code, params->sta->addr, params->status); + + /* + * we got a non-zero status from a peer we were switching to - move to + * the idle state and retry again later + */ + if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && + params->status != 0 && + mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && + mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { + struct ieee80211_sta *cur_sta; + + /* make sure it's the same peer */ + cur_sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], + lockdep_is_held(&mvm->mutex)); + if (cur_sta == params->sta) { + iwl_mvm_tdls_update_cs_state(mvm, + IWL_MVM_TDLS_SW_IDLE); + goto retry; + } + } + + type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ? + TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH; + + iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr, + params->sta->tdls_initiator, 0, + params->chandef, params->timestamp, + params->switch_time, + params->switch_timeout, + params->tmpl_skb, + params->ch_sw_tm_ie); + +retry: + /* register a timeout in case we don't succeed in switching */ + delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * + 1024 / 1000; + mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, + msecs_to_jiffies(delay)); + mutex_unlock(&mvm->mutex); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 6dfad230be5ed658f2cb3f295e71b43d724d293c..54fafbf9a711fb9bacb3149fcb9f8ce1826c4b90 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -191,6 +191,35 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, return true; } +static void +iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data, + struct iwl_time_event_notif *notif) +{ + if (!le32_to_cpu(notif->status)) { + IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); + iwl_mvm_te_clear_data(mvm, te_data); + return; + } + + switch (te_data->vif->type) { + case NL80211_IFTYPE_AP: + iwl_mvm_csa_noa_start(mvm); + break; + case NL80211_IFTYPE_STATION: + iwl_mvm_csa_client_absent(mvm, te_data->vif); + ieee80211_chswitch_done(te_data->vif, true); + break; + default: + /* should never happen */ + WARN_ON_ONCE(1); + break; + } + + /* we don't need it anymore */ + iwl_mvm_te_clear_data(mvm, te_data); +} + /* * Handles a FW notification for an event that is known to the driver. * @@ -252,14 +281,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); iwl_mvm_ref(mvm, IWL_MVM_REF_ROC); ieee80211_ready_on_channel(mvm->hw); - } else if (te_data->vif->type == NL80211_IFTYPE_AP) { - if (le32_to_cpu(notif->status)) - iwl_mvm_csa_noa_start(mvm); - else - IWL_DEBUG_TE(mvm, "CSA NOA failed to start\n"); - - /* we don't need it anymore */ - iwl_mvm_te_clear_data(mvm, te_data); + } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { + iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); } } else { IWL_WARN(mvm, "Got TE with unknown action\n"); @@ -549,18 +572,11 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, } } -/* - * Explicit request to remove a time event. The removal of a time event needs to - * be synchronized with the flow of a time event's end notification, which also - * removes the time event from the op mode data structures. - */ -void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif, - struct iwl_mvm_time_event_data *te_data) +static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, + struct iwl_mvm_time_event_data *te_data, + u32 *uid) { - struct iwl_time_event_cmd time_cmd = {}; - u32 id, uid; - int ret; + u32 id; /* * It is possible that by the time we got to this point the time @@ -569,7 +585,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, spin_lock_bh(&mvm->time_event_lock); /* Save time event uid before clearing its data */ - uid = te_data->uid; + *uid = te_data->uid; id = te_data->id; /* @@ -584,10 +600,59 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, * send a removal command. */ if (id == TE_MAX) { - IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid); - return; + IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid); + return false; } + return true; +} + +/* + * Explicit request to remove a aux roc time event. The removal of a time + * event needs to be synchronized with the flow of a time event's end + * notification, which also removes the time event from the op mode + * data structures. + */ +static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_time_event_data *te_data) +{ + struct iwl_hs20_roc_req aux_cmd = {}; + u32 uid; + int ret; + + if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) + return; + + aux_cmd.event_unique_id = cpu_to_le32(uid); + aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); + aux_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); + IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", + le32_to_cpu(aux_cmd.event_unique_id)); + ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, + sizeof(aux_cmd), &aux_cmd); + + if (WARN_ON(ret)) + return; +} + +/* + * Explicit request to remove a time event. The removal of a time event needs to + * be synchronized with the flow of a time event's end notification, which also + * removes the time event from the op mode data structures. + */ +void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_time_event_data *te_data) +{ + struct iwl_time_event_cmd time_cmd = {}; + u32 uid; + int ret; + + if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) + return; + /* When we remove a TE, the UID is to be set in the id field */ time_cmd.id = cpu_to_le32(uid); time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); @@ -666,13 +731,17 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } -void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm) +void iwl_mvm_stop_roc(struct iwl_mvm *mvm) { struct iwl_mvm_vif *mvmvif; struct iwl_mvm_time_event_data *te_data; + bool is_p2p = false; lockdep_assert_held(&mvm->mutex); + mvmvif = NULL; + spin_lock_bh(&mvm->time_event_lock); + /* * Iterate over the list of time events and find the time event that is * associated with a P2P_DEVICE interface. @@ -680,22 +749,41 @@ void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm) * event at any given time and this time event coresponds to a ROC * request */ - mvmvif = NULL; - spin_lock_bh(&mvm->time_event_lock); list_for_each_entry(te_data, &mvm->time_event_list, list) { - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && + te_data->running) { mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - break; + is_p2p = true; + goto remove_te; } } + + /* + * Iterate over the list of aux roc time events and find the time + * event that is associated with a BSS interface. + * This assumes that a BSS interface can have only a single time + * event at any given time and this time event coresponds to a ROC + * request + */ + list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { + if (te_data->running) { + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); + goto remove_te; + } + } + +remove_te: spin_unlock_bh(&mvm->time_event_lock); if (!mvmvif) { - IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n"); + IWL_WARN(mvm, "No remain on channel event\n"); return; } - iwl_mvm_remove_time_event(mvm, mvmvif, te_data); + if (is_p2p) + iwl_mvm_remove_time_event(mvm, mvmvif, te_data); + else + iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); iwl_mvm_roc_finished(mvm); } diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h index b350e47e19dab0606737a8950d7f2604e1f90016..6f6b35db3ab8eb9b71c8bf178d7227254340a41a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h @@ -182,14 +182,14 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int duration, enum ieee80211_roc_type type); /** - * iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity + * iwl_mvm_stop_roc - stop remain on channel functionality * @mvm: the mvm component * * This function can be used to cancel an ongoing ROC session. * The function is async, it will instruct the FW to stop serving the ROC * session, but will not wait for the actual stopping of the session. */ -void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm); +void iwl_mvm_stop_roc(struct iwl_mvm *mvm); /** * iwl_mvm_remove_time_event - general function to clean up of time event diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c index acca44a450864cc446d3a0a653c20e85314465fd..2b1e61fac34a2b7bcd0456c06a4caf45f86e6e6e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/iwlwifi/mvm/tt.c @@ -64,10 +64,6 @@ *****************************************************************************/ #include "mvm.h" -#include "iwl-config.h" -#include "iwl-io.h" -#include "iwl-csr.h" -#include "iwl-prph.h" #define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ @@ -99,32 +95,81 @@ static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) iwl_mvm_set_hw_ctkill_state(mvm, false); } -static bool iwl_mvm_temp_notif(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) +void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp) +{ + /* ignore the notification if we are in test mode */ + if (mvm->temperature_test) + return; + + if (mvm->temperature == temp) + return; + + mvm->temperature = temp; + iwl_mvm_tt_handler(mvm); +} + +static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) { - struct iwl_mvm *mvm = - container_of(notif_wait, struct iwl_mvm, notif_wait); - int *temp = data; struct iwl_dts_measurement_notif *notif; int len = iwl_rx_packet_payload_len(pkt); + int temp; if (WARN_ON_ONCE(len != sizeof(*notif))) { IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); - return true; + return -EINVAL; } notif = (void *)pkt->data; - *temp = le32_to_cpu(notif->temp); + temp = le32_to_cpu(notif->temp); /* shouldn't be negative, but since it's s32, make sure it isn't */ - if (WARN_ON_ONCE(*temp < 0)) - *temp = 0; + if (WARN_ON_ONCE(temp < 0)) + temp = 0; + + IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp); + + return temp; +} + +static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + int *temp = data; + int ret; + + ret = iwl_mvm_temp_notif_parse(mvm, pkt); + if (ret < 0) + return true; + + *temp = ret; - IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", *temp); return true; } +int iwl_mvm_temp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + int temp; + + /* the notification is handled synchronously in ctkill, so skip here */ + if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) + return 0; + + temp = iwl_mvm_temp_notif_parse(mvm, pkt); + if (temp < 0) + return 0; + + iwl_mvm_tt_temp_changed(mvm, temp); + + return 0; +} + static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) { struct iwl_dts_measurement_cmd cmd = { @@ -145,7 +190,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm) iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, temp_notif, ARRAY_SIZE(temp_notif), - iwl_mvm_temp_notif, &temp); + iwl_mvm_temp_notif_wait, &temp); ret = iwl_mvm_get_temp_cmd(mvm); if (ret) { diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index c6a517c771df5045cd5d2cecc80b4b29848a6c9c..4f15d9decc81bd4fe80bd2ac716667e2c3b97aa1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -73,9 +73,9 @@ /* * Sets most of the Tx cmd's fields */ -static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, - struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, u8 sta_id) +void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, u8 sta_id) { struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; @@ -149,11 +149,9 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, /* * Sets the fields in the Tx cmd that are rate related */ -static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, - struct iwl_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, - __le16 fc) +void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc) { u32 rate_flags; int rate_idx; @@ -189,8 +187,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, /* HT rate doesn't make sense for a non data frame */ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, - "Got an HT rate for a non data frame 0x%x\n", - info->control.rates[0].flags); + "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n", + info->control.rates[0].flags, + info->control.rates[0].idx, + le16_to_cpu(fc)); rate_idx = info->control.rates[0].idx; /* if the rate isn't a well known legacy rate, take the lowest one */ @@ -230,10 +230,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, /* * Sets the fields in the Tx cmd that are crypto related */ -static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd, - struct sk_buff *skb_frag) +void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag) { struct ieee80211_key_conf *keyconf = info->control.hw_key; @@ -424,6 +424,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); + if (sta->tdls) { + /* default to TID 0 for non-QoS packets */ + u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; + + txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; + } + if (is_ampdu) { if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON)) goto drop_unlock_sta; @@ -658,6 +665,12 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, seq_ctl = le16_to_cpu(hdr->seq_ctrl); } + /* + * TODO: this is not accurate if we are freeing more than one + * packet. + */ + info->status.tx_time = + le16_to_cpu(tx_resp->wireless_media_time); BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); info->status.status_driver_data[0] = (void *)(uintptr_t)tx_resp->reduced_tpc; @@ -850,6 +863,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc; + mvmsta->tid_data[tid].tx_time = + le16_to_cpu(tx_resp->wireless_media_time); } rcu_read_unlock(); @@ -878,6 +893,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, info->status.ampdu_len = ba_notif->txed; iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags, info); + /* TODO: not accounted if the whole A-MPDU failed */ + info->status.tx_time = tid_data->tx_time; info->status.status_driver_data[0] = (void *)(uintptr_t)tid_data->reduced_tpc; } diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index 8021f6eec27f7d8a918b41404383d85cc317b7ca..e56e77ef5d2e3d28d9a30185ef257aa43912ed23 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -734,3 +734,40 @@ bool iwl_mvm_is_idle(struct iwl_mvm *mvm) return idle; } + +struct iwl_bss_iter_data { + struct ieee80211_vif *vif; + bool error; +}; + +static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_bss_iter_data *data = _data; + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return; + + if (data->vif) { + data->error = true; + return; + } + + data->vif = vif; +} + +struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) +{ + struct iwl_bss_iter_data bss_iter_data = {}; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_bss_iface_iterator, &bss_iter_data); + + if (bss_iter_data.error) { + IWL_ERR(mvm, "More than one managed interface active!\n"); + return ERR_PTR(-EINVAL); + } + + return bss_iter_data.vif; +} diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 6ced8549eb3a26d1e048f8d63cc8da26bad2caa1..3ee8e3848876f48cce72ce98dff2e2bfde875a23 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -499,6 +499,7 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {} static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; struct iwl_trans *iwl_trans; struct iwl_trans_pcie *trans_pcie; int ret; @@ -507,6 +508,25 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (IS_ERR(iwl_trans)) return PTR_ERR(iwl_trans); +#if IS_ENABLED(CONFIG_IWLMVM) + /* + * special-case 7265D, it has the same PCI IDs. + * + * Note that because we already pass the cfg to the transport above, + * all the parameters that the transport uses must, until that is + * changed, be identical to the ones in the 7265D configuration. + */ + if (cfg == &iwl7265_2ac_cfg) + cfg_7265d = &iwl7265d_2ac_cfg; + else if (cfg == &iwl7265_2n_cfg) + cfg_7265d = &iwl7265d_2n_cfg; + else if (cfg == &iwl7265_n_cfg) + cfg_7265d = &iwl7265d_n_cfg; + if (cfg_7265d && + (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) + cfg = cfg_7265d; +#endif + pci_set_drvdata(pdev, iwl_trans); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index dd2f3f8baa9dbea639005b3b65dbbbda9109408f..5d79a1f44b8e43a723b9de98ca780a9ea5bc5812 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -78,6 +78,11 @@ #include "iwl-agn-hw.h" #include "iwl-fw-error-dump.h" #include "internal.h" +#include "iwl-fh.h" + +/* extended range in FW SRAM */ +#define IWL_FW_MEM_EXTENDED_START 0x40000 +#define IWL_FW_MEM_EXTENDED_END 0x57FFF static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) { @@ -133,7 +138,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) break; } - if (!page) + if (WARN_ON_ONCE(!page)) return; trans_pcie->fw_mon_page = page; @@ -512,6 +517,9 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT); + if (ret >= 0) + iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); + IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); return ret; } @@ -624,14 +632,28 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, } for (offset = 0; offset < section->len; offset += chunk_sz) { - u32 copy_size; + u32 copy_size, dst_addr; + bool extended_addr = false; copy_size = min_t(u32, chunk_sz, section->len - offset); + dst_addr = section->offset + offset; + + if (dst_addr >= IWL_FW_MEM_EXTENDED_START && + dst_addr <= IWL_FW_MEM_EXTENDED_END) + extended_addr = true; + + if (extended_addr) + iwl_set_bits_prph(trans, LMPM_CHICK, + LMPM_CHICK_EXTENDED_ADDR_SPACE); memcpy(v_addr, (u8 *)section->data + offset, copy_size); - ret = iwl_pcie_load_firmware_chunk(trans, - section->offset + offset, - p_addr, copy_size); + ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, + copy_size); + + if (extended_addr) + iwl_clear_bits_prph(trans, LMPM_CHICK, + LMPM_CHICK_EXTENDED_ADDR_SPACE); + if (ret) { IWL_ERR(trans, "Could not load the [%d] uCode section\n", @@ -644,14 +666,14 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, return ret; } -static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans, - const struct fw_img *image, - int cpu, - int *first_ucode_section) +static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans, + const struct fw_img *image, + int cpu, + int *first_ucode_section) { int shift_param; - int i, ret = 0; - u32 last_read_idx = 0; + int i, ret = 0, sec_num = 0x1; + u32 val, last_read_idx = 0; if (cpu == 1) { shift_param = 0; @@ -672,21 +694,16 @@ static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans, break; } - if (i == (*first_ucode_section) + 1) - /* set CPU to started */ - iwl_set_bits_prph(trans, - CSR_UCODE_LOAD_STATUS_ADDR, - LMPM_CPU_HDRS_LOADING_COMPLETED - << shift_param); - ret = iwl_pcie_load_section(trans, i, &image->sec[i]); if (ret) return ret; + + /* Notify the ucode of the loaded section number and status */ + val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); + val = val | (sec_num << shift_param); + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); + sec_num = (sec_num << 1) | 0x1; } - /* image loading complete */ - iwl_set_bits_prph(trans, - CSR_UCODE_LOAD_STATUS_ADDR, - LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param); *first_ucode_section = last_read_idx; @@ -739,49 +756,78 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, return 0; } -static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, - const struct fw_img *image) +static void iwl_pcie_apply_destination(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret = 0; - int first_ucode_section; + const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; + int i; - IWL_DEBUG_FW(trans, - "working with %s image\n", - image->is_secure ? "Secured" : "Non Secured"); - IWL_DEBUG_FW(trans, - "working with %s CPU\n", - image->is_dual_cpus ? "Dual" : "Single"); + if (dest->version) + IWL_ERR(trans, + "DBG DEST version is %d - expect issues\n", + dest->version); - /* configure the ucode to be ready to get the secured image */ - if (image->is_secure) { - /* set secure boot inspector addresses */ - iwl_write_prph(trans, - LMPM_SECURE_INSPECTOR_CODE_ADDR, - LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE); + IWL_INFO(trans, "Applying debug destination %s\n", + get_fw_dbg_mode_string(dest->monitor_mode)); - iwl_write_prph(trans, - LMPM_SECURE_INSPECTOR_DATA_ADDR, - LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE); + if (dest->monitor_mode == EXTERNAL_MODE) + iwl_pcie_alloc_fw_monitor(trans); + else + IWL_WARN(trans, "PCI should have external buffer debug\n"); - /* set CPU1 header address */ - iwl_write_prph(trans, - LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR, - LMPM_SECURE_CPU1_HDR_MEM_SPACE); + for (i = 0; i < trans->dbg_dest_reg_num; i++) { + u32 addr = le32_to_cpu(dest->reg_ops[i].addr); + u32 val = le32_to_cpu(dest->reg_ops[i].val); - /* load to FW the binary Secured sections of CPU1 */ - ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1, - &first_ucode_section); - if (ret) - return ret; + switch (dest->reg_ops[i].op) { + case CSR_ASSIGN: + iwl_write32(trans, addr, val); + break; + case CSR_SETBIT: + iwl_set_bit(trans, addr, BIT(val)); + break; + case CSR_CLEARBIT: + iwl_clear_bit(trans, addr, BIT(val)); + break; + case PRPH_ASSIGN: + iwl_write_prph(trans, addr, val); + break; + case PRPH_SETBIT: + iwl_set_bits_prph(trans, addr, BIT(val)); + break; + case PRPH_CLEARBIT: + iwl_clear_bits_prph(trans, addr, BIT(val)); + break; + default: + IWL_ERR(trans, "FW debug - unknown OP %d\n", + dest->reg_ops[i].op); + break; + } + } - } else { - /* load to FW the binary Non secured sections of CPU1 */ - ret = iwl_pcie_load_cpu_sections(trans, image, 1, - &first_ucode_section); - if (ret) - return ret; + if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { + iwl_write_prph(trans, le32_to_cpu(dest->base_reg), + trans_pcie->fw_mon_phys >> dest->base_shift); + iwl_write_prph(trans, le32_to_cpu(dest->end_reg), + (trans_pcie->fw_mon_phys + + trans_pcie->fw_mon_size) >> dest->end_shift); } +} + +static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, + const struct fw_img *image) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret = 0; + int first_ucode_section; + + IWL_DEBUG_FW(trans, "working with %s CPU\n", + image->is_dual_cpus ? "Dual" : "Single"); + + /* load to FW the binary non secured sections of CPU1 */ + ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); + if (ret) + return ret; if (image->is_dual_cpus) { /* set CPU2 header address */ @@ -790,13 +836,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, LMPM_SECURE_CPU2_HDR_MEM_SPACE); /* load to FW the binary sections of CPU2 */ - if (image->is_secure) - ret = iwl_pcie_load_cpu_secured_sections( - trans, image, 2, - &first_ucode_section); - else - ret = iwl_pcie_load_cpu_sections(trans, image, 2, - &first_ucode_section); + ret = iwl_pcie_load_cpu_sections(trans, image, 2, + &first_ucode_section); if (ret) return ret; } @@ -813,6 +854,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, (trans_pcie->fw_mon_phys + trans_pcie->fw_mon_size) >> 4); } + } else if (trans->dbg_dest_tlv) { + iwl_pcie_apply_destination(trans); } /* release CPU reset */ @@ -821,18 +864,50 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, else iwl_write32(trans, CSR_RESET, 0); - if (image->is_secure) { - /* wait for image verification to complete */ - ret = iwl_poll_prph_bit(trans, - LMPM_SECURE_BOOT_CPU1_STATUS_ADDR, - LMPM_SECURE_BOOT_STATUS_SUCCESS, - LMPM_SECURE_BOOT_STATUS_SUCCESS, - LMPM_SECURE_TIME_OUT); + return 0; +} - if (ret < 0) { - IWL_ERR(trans, "Time out on secure boot process\n"); - return ret; - } +static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans, + const struct fw_img *image) +{ + int ret = 0; + int first_ucode_section; + u32 reg; + + IWL_DEBUG_FW(trans, "working with %s CPU\n", + image->is_dual_cpus ? "Dual" : "Single"); + + /* configure the ucode to be ready to get the secured image */ + /* release CPU reset */ + iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); + + /* load to FW the binary Secured sections of CPU1 */ + ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 1, + &first_ucode_section); + if (ret) + return ret; + + /* load to FW the binary sections of CPU2 */ + ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 2, + &first_ucode_section); + if (ret) + return ret; + + /* Notify FW loading is done */ + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); + + /* wait for image verification to complete */ + ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0, + LMPM_SECURE_BOOT_STATUS_SUCCESS, + LMPM_SECURE_BOOT_STATUS_SUCCESS, + LMPM_SECURE_TIME_OUT); + if (ret < 0) { + reg = iwl_read_prph(trans, + LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0); + + IWL_ERR(trans, "Timeout on secure boot process, reg = %x\n", + reg); + return ret; } return 0; @@ -884,7 +959,11 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); /* Load the given image to the HW */ - return iwl_pcie_load_given_ucode(trans, fw); + if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) && + (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)) + return iwl_pcie_load_given_ucode_8000b(trans, fw); + else + return iwl_pcie_load_given_ucode(trans, fw); } static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) @@ -941,7 +1020,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); /* stop and reset the on-board processor */ - iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + udelay(20); /* clear all status bits */ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); @@ -974,6 +1054,9 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) clear_bit(STATUS_RFKILL, &trans->status); if (hw_rfkill != was_hw_rfkill) iwl_trans_pcie_rf_kill(trans, hw_rfkill); + + /* re-take ownership to prevent other users from stealing the deivce */ + iwl_pcie_prepare_card_hw(trans); } void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) @@ -1023,14 +1106,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } - iwl_pcie_set_pwr(trans, false); - - val = iwl_read32(trans, CSR_RESET); - if (val & CSR_RESET_REG_FLAG_NEVO_RESET) { - *status = IWL_D3_STATUS_RESET; - return 0; - } - /* * Also enables interrupts - none will happen as the device doesn't * know we're waking it up, only when the opmode actually tells it @@ -1041,6 +1116,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + udelay(2); + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, @@ -1050,6 +1128,8 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return ret; } + iwl_pcie_set_pwr(trans, false); + iwl_trans_pcie_tx_reset(trans); ret = iwl_pcie_rx_init(trans); @@ -1058,7 +1138,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return ret; } - *status = IWL_D3_STATUS_ALIVE; + val = iwl_read32(trans, CSR_RESET); + if (val & CSR_RESET_REG_FLAG_NEVO_RESET) + *status = IWL_D3_STATUS_RESET; + else + *status = IWL_D3_STATUS_ALIVE; + return 0; } @@ -1236,6 +1321,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, /* this bit wakes up the NIC */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + udelay(2); /* * These bits say the device is running, and should keep running for @@ -1767,6 +1854,13 @@ err: IWL_ERR(trans, "failed to create the trans debugfs entry\n"); return -ENOMEM; } +#else +static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, + struct dentry *dir) +{ + return 0; +} +#endif /*CONFIG_IWLWIFI_DEBUGFS */ static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd) { @@ -1937,6 +2031,31 @@ static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, return csr_len; } +static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data) +{ + u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; + unsigned long flags; + __le32 *val; + int i; + + if (!iwl_trans_grab_nic_access(trans, false, &flags)) + return 0; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); + (*data)->len = cpu_to_le32(fh_regs_len); + val = (void *)(*data)->data; + + for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) + *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); + + iwl_trans_release_nic_access(trans, &flags); + + *data = iwl_fw_error_next_data(*data); + + return sizeof(**data) + fh_regs_len; +} + static struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) { @@ -1946,6 +2065,7 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len; + u32 monitor_len; int i, ptr; /* transport dump header */ @@ -1968,10 +2088,34 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) num_bytes_in_chunk; } + /* FH registers */ + len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); + /* FW monitor */ - if (trans_pcie->fw_mon_page) + if (trans_pcie->fw_mon_page) { + len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + + trans_pcie->fw_mon_size; + monitor_len = trans_pcie->fw_mon_size; + } else if (trans->dbg_dest_tlv) { + u32 base, end; + + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + end = le32_to_cpu(trans->dbg_dest_tlv->end_reg); + + base = iwl_read_prph(trans, base) << + trans->dbg_dest_tlv->base_shift; + end = iwl_read_prph(trans, end) << + trans->dbg_dest_tlv->end_shift; + + /* Make "end" point to the actual end */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + end += (1 << trans->dbg_dest_tlv->end_shift); + monitor_len = end - base; len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + - trans_pcie->fw_mon_size; + monitor_len; + } else { + monitor_len = 0; + } dump_data = vzalloc(len); if (!dump_data) @@ -2008,49 +2152,77 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) len += iwl_trans_pcie_dump_prph(trans, &data); len += iwl_trans_pcie_dump_csr(trans, &data); + len += iwl_trans_pcie_fh_regs_dump(trans, &data); /* data is already pointing to the next section */ - if (trans_pcie->fw_mon_page) { + if ((trans_pcie->fw_mon_page && + trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || + trans->dbg_dest_tlv) { struct iwl_fw_error_dump_fw_mon *fw_mon_data; + u32 base, write_ptr, wrap_cnt; + + /* If there was a dest TLV - use the values from there */ + if (trans->dbg_dest_tlv) { + write_ptr = + le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); + wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + } else { + base = MON_BUFF_BASE_ADDR; + write_ptr = MON_BUFF_WRPTR; + wrap_cnt = MON_BUFF_CYCLE_CNT; + } data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); - data->len = cpu_to_le32(trans_pcie->fw_mon_size + - sizeof(*fw_mon_data)); fw_mon_data = (void *)data->data; fw_mon_data->fw_mon_wr_ptr = - cpu_to_le32(iwl_read_prph(trans, MON_BUFF_WRPTR)); + cpu_to_le32(iwl_read_prph(trans, write_ptr)); fw_mon_data->fw_mon_cycle_cnt = - cpu_to_le32(iwl_read_prph(trans, MON_BUFF_CYCLE_CNT)); + cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); fw_mon_data->fw_mon_base_ptr = - cpu_to_le32(iwl_read_prph(trans, MON_BUFF_BASE_ADDR)); - - /* - * The firmware is now asserted, it won't write anything to - * the buffer. CPU can take ownership to fetch the data. - * The buffer will be handed back to the device before the - * firmware will be restarted. - */ - dma_sync_single_for_cpu(trans->dev, trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, - DMA_FROM_DEVICE); - memcpy(fw_mon_data->data, page_address(trans_pcie->fw_mon_page), - trans_pcie->fw_mon_size); - - len += sizeof(*data) + sizeof(*fw_mon_data) + - trans_pcie->fw_mon_size; + cpu_to_le32(iwl_read_prph(trans, base)); + + len += sizeof(*data) + sizeof(*fw_mon_data); + if (trans_pcie->fw_mon_page) { + data->len = cpu_to_le32(trans_pcie->fw_mon_size + + sizeof(*fw_mon_data)); + + /* + * The firmware is now asserted, it won't write anything + * to the buffer. CPU can take ownership to fetch the + * data. The buffer will be handed back to the device + * before the firmware will be restarted. + */ + dma_sync_single_for_cpu(trans->dev, + trans_pcie->fw_mon_phys, + trans_pcie->fw_mon_size, + DMA_FROM_DEVICE); + memcpy(fw_mon_data->data, + page_address(trans_pcie->fw_mon_page), + trans_pcie->fw_mon_size); + + len += trans_pcie->fw_mon_size; + } else { + /* If we are here then the buffer is internal */ + + /* + * Update pointers to reflect actual values after + * shifting + */ + base = iwl_read_prph(trans, base) << + trans->dbg_dest_tlv->base_shift; + iwl_trans_read_mem(trans, base, fw_mon_data->data, + monitor_len / sizeof(u32)); + data->len = cpu_to_le32(sizeof(*fw_mon_data) + + monitor_len); + len += monitor_len; + } } dump_data->len = len; return dump_data; } -#else -static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, - struct dentry *dir) -{ - return 0; -} -#endif /*CONFIG_IWLWIFI_DEBUGFS */ static const struct iwl_trans_ops trans_ops_pcie = { .start_hw = iwl_trans_pcie_start_hw, @@ -2087,9 +2259,7 @@ static const struct iwl_trans_ops trans_ops_pcie = { .release_nic_access = iwl_trans_pcie_release_nic_access, .set_bits_mask = iwl_trans_pcie_set_bits_mask, -#ifdef CONFIG_IWLWIFI_DEBUGFS .dump_data = iwl_trans_pcie_dump_data, -#endif }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index eb8e2984c5e90f6d8f498e69b1cee4420dffb85d..8a6c7a084aa1271da349ae82a9077b9c07284cdb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -989,6 +989,65 @@ out: spin_unlock_bh(&txq->lock); } +static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + lockdep_assert_held(&trans_pcie->reg_lock); + + if (trans_pcie->cmd_in_flight) + return 0; + + trans_pcie->cmd_in_flight = true; + + /* + * wake up the NIC to make sure that the firmware will see the host + * command - we will let the NIC sleep once all the host commands + * returned. This needs to be done only on NICs that have + * apmg_wake_up_wa set. + */ + if (trans->cfg->base_params->apmg_wake_up_wa) { + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + udelay(2); + + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), + 15000); + if (ret < 0) { + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + trans_pcie->cmd_in_flight = false; + IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); + return -EIO; + } + } + + return 0; +} + +static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->reg_lock); + + if (WARN_ON(!trans_pcie->cmd_in_flight)) + return 0; + + trans_pcie->cmd_in_flight = false; + + if (trans->cfg->base_params->apmg_wake_up_wa) + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + return 0; +} + /* * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd * @@ -1024,14 +1083,9 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) } } - if (trans->cfg->base_params->apmg_wake_up_wa && - q->read_ptr == q->write_ptr) { + if (q->read_ptr == q->write_ptr) { spin_lock_irqsave(&trans_pcie->reg_lock, flags); - WARN_ON(!trans_pcie->cmd_in_flight); - trans_pcie->cmd_in_flight = false; - __iwl_trans_pcie_clear_bit(trans, - CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + iwl_pcie_clear_cmd_in_flight(trans); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } @@ -1419,32 +1473,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); spin_lock_irqsave(&trans_pcie->reg_lock, flags); - - /* - * wake up the NIC to make sure that the firmware will see the host - * command - we will let the NIC sleep once all the host commands - * returned. This needs to be done only on NICs that have - * apmg_wake_up_wa set. - */ - if (trans->cfg->base_params->apmg_wake_up_wa && - !trans_pcie->cmd_in_flight) { - trans_pcie->cmd_in_flight = true; - __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), - 15000); - if (ret < 0) { - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); - trans_pcie->cmd_in_flight = false; - IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); - idx = -EIO; - goto out; - } + ret = iwl_pcie_set_cmd_in_flight(trans); + if (ret < 0) { + idx = ret; + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + goto out; } /* Increment and update queue's write index */ diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c9ad4cf1adfb4b1d7a30068c2546ac7354345139..a71b9d5e353d6d114c10d712b72f553f6edfe8ec 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -412,6 +412,11 @@ struct mac80211_hwsim_data { struct mac_address addresses[2]; int channels, idx; bool use_chanctx; + bool destroy_on_close; + struct work_struct destroy_work; + u32 portid; + char alpha2[2]; + const struct ieee80211_regdomain *regd; struct ieee80211_channel *tmp_chan; struct delayed_work roc_done; @@ -419,6 +424,7 @@ struct mac80211_hwsim_data { struct cfg80211_scan_request *hw_scan_request; struct ieee80211_vif *hw_scan_vif; int scan_chan_idx; + u8 scan_addr[ETH_ALEN]; struct ieee80211_channel *channel; u64 beacon_int /* beacon interval in us */; @@ -436,7 +442,7 @@ struct mac80211_hwsim_data { /* * Only radios in the same group can communicate together (the * channel has to match too). Each bit represents a group. A - * radio can be in more then one group. + * radio can be in more than one group. */ u64 group; @@ -447,6 +453,14 @@ struct mac80211_hwsim_data { s64 bcn_delta; /* absolute beacon transmission time. Used to cover up "tx" delay. */ u64 abs_bcn_ts; + + /* Stats */ + u64 tx_pkts; + u64 rx_pkts; + u64 tx_bytes; + u64 rx_bytes; + u64 tx_dropped; + u64 tx_failed; }; @@ -476,6 +490,14 @@ static struct genl_family hwsim_genl_family = { .maxattr = HWSIM_ATTR_MAX, }; +enum hwsim_multicast_groups { + HWSIM_MCGRP_CONFIG, +}; + +static const struct genl_multicast_group hwsim_mcgrps[] = { + [HWSIM_MCGRP_CONFIG] = { .name = "config", }, +}; + /* MAC80211_HWSIM netlink policy */ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { @@ -496,6 +518,10 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 }, [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG }, [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG }, + [HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG }, + [HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING }, + [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG }, + [HWSIM_ATTR_FREQ] = { .type = NLA_U32 }, }; static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, @@ -807,6 +833,9 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data, .ret = false, }; + if (data->scanning && memcmp(addr, data->scan_addr, ETH_ALEN) == 0) + return true; + memcpy(md.addr, addr, ETH_ALEN); ieee80211_iterate_active_interfaces_atomic(data->hw, @@ -861,8 +890,10 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, /* If the queue contains MAX_QUEUE skb's drop some */ if (skb_queue_len(&data->pending) >= MAX_QUEUE) { /* Droping until WARN_QUEUE level */ - while (skb_queue_len(&data->pending) >= WARN_QUEUE) - skb_dequeue(&data->pending); + while (skb_queue_len(&data->pending) >= WARN_QUEUE) { + ieee80211_free_txskb(hw, skb_dequeue(&data->pending)); + data->tx_dropped++; + } } skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); @@ -896,6 +927,9 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags)) goto nla_put_failure; + if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq)) + goto nla_put_failure; + /* We get the tx control (rate and retries) info*/ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { @@ -917,10 +951,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, /* Enqueue the packet */ skb_queue_tail(&data->pending, my_skb); + data->tx_pkts++; + data->tx_bytes += my_skb->len; return; nla_put_failure: printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); + ieee80211_free_txskb(hw, my_skb); + data->tx_failed++; } static bool hwsim_chans_compat(struct ieee80211_channel *c1, @@ -952,6 +990,53 @@ static void mac80211_hwsim_tx_iter(void *_data, u8 *addr, data->receive = true; } +static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb) +{ + /* + * To enable this code, #define the HWSIM_RADIOTAP_OUI, + * e.g. like this: + * #define HWSIM_RADIOTAP_OUI "\x02\x00\x00" + * (but you should use a valid OUI, not that) + * + * If anyone wants to 'donate' a radiotap OUI/subns code + * please send a patch removing this #ifdef and changing + * the values accordingly. + */ +#ifdef HWSIM_RADIOTAP_OUI + struct ieee80211_vendor_radiotap *rtap; + + /* + * Note that this code requires the headroom in the SKB + * that was allocated earlier. + */ + rtap = (void *)skb_push(skb, sizeof(*rtap) + 8 + 4); + rtap->oui[0] = HWSIM_RADIOTAP_OUI[0]; + rtap->oui[1] = HWSIM_RADIOTAP_OUI[1]; + rtap->oui[2] = HWSIM_RADIOTAP_OUI[2]; + rtap->subns = 127; + + /* + * Radiotap vendor namespaces can (and should) also be + * split into fields by using the standard radiotap + * presence bitmap mechanism. Use just BIT(0) here for + * the presence bitmap. + */ + rtap->present = BIT(0); + /* We have 8 bytes of (dummy) data */ + rtap->len = 8; + /* For testing, also require it to be aligned */ + rtap->align = 8; + /* And also test that padding works, 4 bytes */ + rtap->pad = 4; + /* push the data */ + memcpy(rtap->data, "ABCDEFGH", 8); + /* make sure to clear padding, mac80211 doesn't */ + memset(rtap->data + 8, 0, 4); + + IEEE80211_SKB_RXCB(skb)->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; +#endif +} + static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan) @@ -1066,6 +1151,11 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, rx_status.mactime = now + data2->tsf_offset; memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); + + mac80211_hwsim_add_vendor_rtap(nskb); + + data2->rx_pkts++; + data2->rx_bytes += nskb->len; ieee80211_rx_irqsafe(data2->hw, nskb); } spin_unlock(&hwsim_radio_lock); @@ -1133,6 +1223,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); /* NO wmediumd detected, perfect medium simulation */ + data->tx_pkts++; + data->tx_bytes += skb->len; ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel); if (ack && skb->len >= 16) { @@ -1716,7 +1808,7 @@ static void hw_scan_work(struct work_struct *work) struct sk_buff *probe; probe = ieee80211_probereq_get(hwsim->hw, - hwsim->hw_scan_vif, + hwsim->scan_addr, req->ssids[i].ssid, req->ssids[i].ssid_len, req->ie_len); @@ -1754,6 +1846,12 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, hwsim->hw_scan_request = req; hwsim->hw_scan_vif = vif; hwsim->scan_chan_idx = 0; + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + get_random_mask_addr(hwsim->scan_addr, + hw_req->req.mac_addr, + hw_req->req.mac_addr_mask); + else + memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN); mutex_unlock(&hwsim->mutex); wiphy_debug(hw->wiphy, "hwsim hw_scan request\n"); @@ -1780,7 +1878,9 @@ static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw, mutex_unlock(&hwsim->mutex); } -static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw) +static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct mac80211_hwsim_data *hwsim = hw->priv; @@ -1792,13 +1892,16 @@ static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw) } printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n"); + + memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN); hwsim->scanning = true; out: mutex_unlock(&hwsim->mutex); } -static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw) +static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct mac80211_hwsim_data *hwsim = hw->priv; @@ -1806,6 +1909,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw) printk(KERN_DEBUG "hwsim sw_scan_complete\n"); hwsim->scanning = false; + memset(hwsim->scan_addr, 0, ETH_ALEN); mutex_unlock(&hwsim->mutex); } @@ -1916,6 +2020,57 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw, hwsim_check_chanctx_magic(ctx); } +static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = { + "tx_pkts_nic", + "tx_bytes_nic", + "rx_pkts_nic", + "rx_bytes_nic", + "d_tx_dropped", + "d_tx_failed", + "d_ps_mode", + "d_group", + "d_tx_power", +}; + +#define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats) + +static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset == ETH_SS_STATS) + memcpy(data, *mac80211_hwsim_gstrings_stats, + sizeof(mac80211_hwsim_gstrings_stats)); +} + +static int mac80211_hwsim_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset) +{ + if (sset == ETH_SS_STATS) + return MAC80211_HWSIM_SSTATS_LEN; + return 0; +} + +static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct mac80211_hwsim_data *ar = hw->priv; + int i = 0; + + data[i++] = ar->tx_pkts; + data[i++] = ar->tx_bytes; + data[i++] = ar->rx_pkts; + data[i++] = ar->rx_bytes; + data[i++] = ar->tx_dropped; + data[i++] = ar->tx_failed; + data[i++] = ar->ps; + data[i++] = ar->group; + data[i++] = ar->power_level; + + WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN); +} + static const struct ieee80211_ops mac80211_hwsim_ops = { .tx = mac80211_hwsim_tx, .start = mac80211_hwsim_start, @@ -1939,14 +2094,131 @@ static const struct ieee80211_ops mac80211_hwsim_ops = { .flush = mac80211_hwsim_flush, .get_tsf = mac80211_hwsim_get_tsf, .set_tsf = mac80211_hwsim_set_tsf, + .get_et_sset_count = mac80211_hwsim_get_et_sset_count, + .get_et_stats = mac80211_hwsim_get_et_stats, + .get_et_strings = mac80211_hwsim_get_et_strings, }; static struct ieee80211_ops mac80211_hwsim_mchan_ops; -static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, - const struct ieee80211_regdomain *regd, - bool reg_strict, bool p2p_device, - bool use_chanctx) +struct hwsim_new_radio_params { + unsigned int channels; + const char *reg_alpha2; + const struct ieee80211_regdomain *regd; + bool reg_strict; + bool p2p_device; + bool use_chanctx; + bool destroy_on_close; + const char *hwname; + bool no_vif; +}; + +static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb, + struct genl_info *info) +{ + if (info) + genl_notify(&hwsim_genl_family, mcast_skb, + genl_info_net(info), info->snd_portid, + HWSIM_MCGRP_CONFIG, info->nlhdr, GFP_KERNEL); + else + genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0, + HWSIM_MCGRP_CONFIG, GFP_KERNEL); +} + +static int append_radio_msg(struct sk_buff *skb, int id, + struct hwsim_new_radio_params *param) +{ + int ret; + + ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); + if (ret < 0) + return ret; + + if (param->channels) { + ret = nla_put_u32(skb, HWSIM_ATTR_CHANNELS, param->channels); + if (ret < 0) + return ret; + } + + if (param->reg_alpha2) { + ret = nla_put(skb, HWSIM_ATTR_REG_HINT_ALPHA2, 2, + param->reg_alpha2); + if (ret < 0) + return ret; + } + + if (param->regd) { + int i; + + for (i = 0; hwsim_world_regdom_custom[i] != param->regd && + i < ARRAY_SIZE(hwsim_world_regdom_custom); i++) + ; + + if (i < ARRAY_SIZE(hwsim_world_regdom_custom)) { + ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i); + if (ret < 0) + return ret; + } + } + + if (param->reg_strict) { + ret = nla_put_flag(skb, HWSIM_ATTR_REG_STRICT_REG); + if (ret < 0) + return ret; + } + + if (param->p2p_device) { + ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_P2P_DEVICE); + if (ret < 0) + return ret; + } + + if (param->use_chanctx) { + ret = nla_put_flag(skb, HWSIM_ATTR_USE_CHANCTX); + if (ret < 0) + return ret; + } + + if (param->hwname) { + ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, + strlen(param->hwname), param->hwname); + if (ret < 0) + return ret; + } + + return 0; +} + +static void hwsim_mcast_new_radio(int id, struct genl_info *info, + struct hwsim_new_radio_params *param) +{ + struct sk_buff *mcast_skb; + void *data; + + mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!mcast_skb) + return; + + data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0, + HWSIM_CMD_NEW_RADIO); + if (!data) + goto out_err; + + if (append_radio_msg(mcast_skb, id, param) < 0) + goto out_err; + + genlmsg_end(mcast_skb, data); + + hwsim_mcast_config_msg(mcast_skb, info); + return; + +out_err: + genlmsg_cancel(mcast_skb, data); + nlmsg_free(mcast_skb); +} + +static int mac80211_hwsim_new_radio(struct genl_info *info, + struct hwsim_new_radio_params *param) { int err; u8 addr[ETH_ALEN]; @@ -1956,16 +2228,16 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, const struct ieee80211_ops *ops = &mac80211_hwsim_ops; int idx; - if (WARN_ON(channels > 1 && !use_chanctx)) + if (WARN_ON(param->channels > 1 && !param->use_chanctx)) return -EINVAL; spin_lock_bh(&hwsim_radio_lock); idx = hwsim_radio_idx++; spin_unlock_bh(&hwsim_radio_lock); - if (use_chanctx) + if (param->use_chanctx) ops = &mac80211_hwsim_mchan_ops; - hw = ieee80211_alloc_hw(sizeof(*data), ops); + hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname); if (!hw) { printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n"); err = -ENOMEM; @@ -2003,9 +2275,12 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, hw->wiphy->n_addresses = 2; hw->wiphy->addresses = data->addresses; - data->channels = channels; - data->use_chanctx = use_chanctx; + data->channels = param->channels; + data->use_chanctx = param->use_chanctx; data->idx = idx; + data->destroy_on_close = param->destroy_on_close; + if (info) + data->portid = info->snd_portid; if (data->use_chanctx) { hw->wiphy->max_scan_ssids = 255; @@ -2014,12 +2289,12 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, /* For channels > 1 DFS is not allowed */ hw->wiphy->n_iface_combinations = 1; hw->wiphy->iface_combinations = &data->if_combination; - if (p2p_device) + if (param->p2p_device) data->if_combination = hwsim_if_comb_p2p_dev[0]; else data->if_combination = hwsim_if_comb[0]; data->if_combination.num_different_channels = data->channels; - } else if (p2p_device) { + } else if (param->p2p_device) { hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev; hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb_p2p_dev); @@ -2040,7 +2315,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT); - if (p2p_device) + if (param->p2p_device) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE); hw->flags = IEEE80211_HW_MFP_CAPABLE | @@ -2060,7 +2335,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_STATIC_SMPS | - NL80211_FEATURE_DYNAMIC_SMPS; + NL80211_FEATURE_DYNAMIC_SMPS | + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); @@ -2095,6 +2371,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, sband->ht_cap.ht_supported = true; sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_DSSSCCK40; sband->ht_cap.ampdu_factor = 0x3; @@ -2111,7 +2388,6 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, sband->vht_cap.cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ | - IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | @@ -2142,15 +2418,19 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, hw->max_rates = 4; hw->max_rate_tries = 11; - if (reg_strict) + if (param->reg_strict) hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG; - if (regd) { + if (param->regd) { + data->regd = param->regd; hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; - wiphy_apply_custom_regulatory(hw->wiphy, regd); + wiphy_apply_custom_regulatory(hw->wiphy, param->regd); /* give the regulatory workqueue a chance to run */ schedule_timeout_interruptible(1); } + if (param->no_vif) + hw->flags |= IEEE80211_HW_NO_AUTO_VIF; + err = ieee80211_register_hw(hw); if (err < 0) { printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n", @@ -2160,8 +2440,11 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr); - if (reg_alpha2) - regulatory_hint(hw->wiphy, reg_alpha2); + if (param->reg_alpha2) { + data->alpha2[0] = param->reg_alpha2[0]; + data->alpha2[1] = param->reg_alpha2[1]; + regulatory_hint(hw->wiphy, param->reg_alpha2); + } data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir); debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps); @@ -2180,6 +2463,9 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, list_add_tail(&data->list, &hwsim_radios); spin_unlock_bh(&hwsim_radio_lock); + if (idx > 0) + hwsim_mcast_new_radio(idx, info, param); + return idx; failed_hw: @@ -2192,8 +2478,46 @@ failed: return err; } -static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data) +static void hwsim_mcast_del_radio(int id, const char *hwname, + struct genl_info *info) { + struct sk_buff *skb; + void *data; + int ret; + + skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!skb) + return; + + data = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, + HWSIM_CMD_DEL_RADIO); + if (!data) + goto error; + + ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); + if (ret < 0) + goto error; + + ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(hwname), + hwname); + if (ret < 0) + goto error; + + genlmsg_end(skb, data); + + hwsim_mcast_config_msg(skb, info); + + return; + +error: + nlmsg_free(skb); +} + +static void mac80211_hwsim_del_radio(struct mac80211_hwsim_data *data, + const char *hwname, + struct genl_info *info) +{ + hwsim_mcast_del_radio(data->idx, hwname, info); debugfs_remove_recursive(data->debugfs); ieee80211_unregister_hw(data->hw); device_release_driver(data->dev); @@ -2201,6 +2525,46 @@ static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data) ieee80211_free_hw(data->hw); } +static int mac80211_hwsim_get_radio(struct sk_buff *skb, + struct mac80211_hwsim_data *data, + u32 portid, u32 seq, + struct netlink_callback *cb, int flags) +{ + void *hdr; + struct hwsim_new_radio_params param = { }; + int res = -EMSGSIZE; + + hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags, + HWSIM_CMD_GET_RADIO); + if (!hdr) + return -EMSGSIZE; + + if (cb) + genl_dump_check_consistent(cb, hdr, &hwsim_genl_family); + + if (data->alpha2[0] && data->alpha2[1]) + param.reg_alpha2 = data->alpha2; + + param.reg_strict = !!(data->hw->wiphy->regulatory_flags & + REGULATORY_STRICT_REG); + param.p2p_device = !!(data->hw->wiphy->interface_modes & + BIT(NL80211_IFTYPE_P2P_DEVICE)); + param.use_chanctx = data->use_chanctx; + param.regd = data->regd; + param.channels = data->channels; + param.hwname = wiphy_name(data->hw->wiphy); + + res = append_radio_msg(skb, data->idx, ¶m); + if (res < 0) + goto out_err; + + return genlmsg_end(skb, hdr); + +out_err: + genlmsg_cancel(skb, hdr); + return res; +} + static void mac80211_hwsim_free(void) { struct mac80211_hwsim_data *data; @@ -2211,7 +2575,8 @@ static void mac80211_hwsim_free(void) list))) { list_del(&data->list); spin_unlock_bh(&hwsim_radio_lock); - mac80211_hwsim_destroy_radio(data); + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), + NULL); spin_lock_bh(&hwsim_radio_lock); } spin_unlock_bh(&hwsim_radio_lock); @@ -2339,7 +2704,6 @@ out: static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, struct genl_info *info) { - struct mac80211_hwsim_data *data2; struct ieee80211_rx_status rx_status; const u8 *dst; @@ -2382,18 +2746,22 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, /* A frame is received from user space */ memset(&rx_status, 0, sizeof(rx_status)); + /* TODO: Check ATTR_FREQ if it exists, and maybe throw away off-channel + * packets? + */ rx_status.freq = data2->channel->center_freq; rx_status.band = data2->channel->band; rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + data2->rx_pkts++; + data2->rx_bytes += skb->len; ieee80211_rx_irqsafe(data2->hw, skb); return 0; err: printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); - goto out; out: dev_kfree_skb(skb); return -EINVAL; @@ -2429,54 +2797,72 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2, return 0; } -static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info) +static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { - unsigned int chans = channels; - const char *alpha2 = NULL; - const struct ieee80211_regdomain *regd = NULL; - bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; - bool p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; - bool use_chanctx; + struct hwsim_new_radio_params param = { 0 }; + + param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; + param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; + param.channels = channels; + param.destroy_on_close = + info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE]; if (info->attrs[HWSIM_ATTR_CHANNELS]) - chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); + param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); + + if (info->attrs[HWSIM_ATTR_NO_VIF]) + param.no_vif = true; + + if (info->attrs[HWSIM_ATTR_RADIO_NAME]) + param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) - use_chanctx = true; + param.use_chanctx = true; else - use_chanctx = (chans > 1); + param.use_chanctx = (param.channels > 1); if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) - alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); + param.reg_alpha2 = + nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) return -EINVAL; - regd = hwsim_world_regdom_custom[idx]; + param.regd = hwsim_world_regdom_custom[idx]; } - return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict, - p2p_device, use_chanctx); + return mac80211_hwsim_new_radio(info, ¶m); } -static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info) +static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct mac80211_hwsim_data *data; - int idx; + s64 idx = -1; + const char *hwname = NULL; - if (!info->attrs[HWSIM_ATTR_RADIO_ID]) + if (info->attrs[HWSIM_ATTR_RADIO_ID]) + idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); + else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) + hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); + else return -EINVAL; - idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) { - if (data->idx != idx) - continue; + if (idx >= 0) { + if (data->idx != idx) + continue; + } else { + if (strcmp(hwname, wiphy_name(data->hw->wiphy))) + continue; + } + list_del(&data->list); spin_unlock_bh(&hwsim_radio_lock); - mac80211_hwsim_destroy_radio(data); + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), + info); return 0; } spin_unlock_bh(&hwsim_radio_lock); @@ -2484,6 +2870,77 @@ static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info) return -ENODEV; } +static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) +{ + struct mac80211_hwsim_data *data; + struct sk_buff *skb; + int idx, res = -ENODEV; + + if (!info->attrs[HWSIM_ATTR_RADIO_ID]) + return -EINVAL; + idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry(data, &hwsim_radios, list) { + if (data->idx != idx) + continue; + + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!skb) { + res = -ENOMEM; + goto out_err; + } + + res = mac80211_hwsim_get_radio(skb, data, info->snd_portid, + info->snd_seq, NULL, 0); + if (res < 0) { + nlmsg_free(skb); + goto out_err; + } + + genlmsg_reply(skb, info); + break; + } + +out_err: + spin_unlock_bh(&hwsim_radio_lock); + + return res; +} + +static int hwsim_dump_radio_nl(struct sk_buff *skb, + struct netlink_callback *cb) +{ + int idx = cb->args[0]; + struct mac80211_hwsim_data *data = NULL; + int res; + + spin_lock_bh(&hwsim_radio_lock); + + if (idx == hwsim_radio_idx) + goto done; + + list_for_each_entry(data, &hwsim_radios, list) { + if (data->idx < idx) + continue; + + res = mac80211_hwsim_get_radio(skb, data, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, cb, + NLM_F_MULTI); + if (res < 0) + break; + + idx = data->idx + 1; + } + + cb->args[0] = idx; + +done: + spin_unlock_bh(&hwsim_radio_lock); + return skb->len; +} + /* Generic Netlink operations array */ static const struct genl_ops hwsim_ops[] = { { @@ -2503,19 +2960,48 @@ static const struct genl_ops hwsim_ops[] = { .doit = hwsim_tx_info_frame_received_nl, }, { - .cmd = HWSIM_CMD_CREATE_RADIO, + .cmd = HWSIM_CMD_NEW_RADIO, .policy = hwsim_genl_policy, - .doit = hwsim_create_radio_nl, + .doit = hwsim_new_radio_nl, .flags = GENL_ADMIN_PERM, }, { - .cmd = HWSIM_CMD_DESTROY_RADIO, + .cmd = HWSIM_CMD_DEL_RADIO, .policy = hwsim_genl_policy, - .doit = hwsim_destroy_radio_nl, + .doit = hwsim_del_radio_nl, .flags = GENL_ADMIN_PERM, }, + { + .cmd = HWSIM_CMD_GET_RADIO, + .policy = hwsim_genl_policy, + .doit = hwsim_get_radio_nl, + .dumpit = hwsim_dump_radio_nl, + }, }; +static void destroy_radio(struct work_struct *work) +{ + struct mac80211_hwsim_data *data = + container_of(work, struct mac80211_hwsim_data, destroy_work); + + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); +} + +static void remove_user_radios(u32 portid) +{ + struct mac80211_hwsim_data *entry, *tmp; + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { + if (entry->destroy_on_close && entry->portid == portid) { + list_del(&entry->list); + INIT_WORK(&entry->destroy_work, destroy_radio); + schedule_work(&entry->destroy_work); + } + } + spin_unlock_bh(&hwsim_radio_lock); +} + static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, unsigned long state, void *_notify) @@ -2525,6 +3011,8 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, if (state != NETLINK_URELEASE) return NOTIFY_DONE; + remove_user_radios(notify->portid); + if (notify->portid == wmediumd_portid) { printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" " socket, switching to perfect channel medium\n"); @@ -2544,7 +3032,9 @@ static int hwsim_init_netlink(void) printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); - rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops); + rc = genl_register_family_with_ops_groups(&hwsim_genl_family, + hwsim_ops, + hwsim_mcgrps); if (rc) goto failure; @@ -2605,69 +3095,73 @@ static int __init init_mac80211_hwsim(void) goto out_unregister_driver; } + err = hwsim_init_netlink(); + if (err < 0) + goto out_unregister_driver; + for (i = 0; i < radios; i++) { - const char *reg_alpha2 = NULL; - const struct ieee80211_regdomain *regd = NULL; - bool reg_strict = false; + struct hwsim_new_radio_params param = { 0 }; + + param.channels = channels; switch (regtest) { case HWSIM_REGTEST_DIFF_COUNTRY: if (i < ARRAY_SIZE(hwsim_alpha2s)) - reg_alpha2 = hwsim_alpha2s[i]; + param.reg_alpha2 = hwsim_alpha2s[i]; break; case HWSIM_REGTEST_DRIVER_REG_FOLLOW: if (!i) - reg_alpha2 = hwsim_alpha2s[0]; + param.reg_alpha2 = hwsim_alpha2s[0]; break; case HWSIM_REGTEST_STRICT_ALL: - reg_strict = true; + param.reg_strict = true; case HWSIM_REGTEST_DRIVER_REG_ALL: - reg_alpha2 = hwsim_alpha2s[0]; + param.reg_alpha2 = hwsim_alpha2s[0]; break; case HWSIM_REGTEST_WORLD_ROAM: if (i == 0) - regd = &hwsim_world_regdom_custom_01; + param.regd = &hwsim_world_regdom_custom_01; break; case HWSIM_REGTEST_CUSTOM_WORLD: - regd = &hwsim_world_regdom_custom_01; + param.regd = &hwsim_world_regdom_custom_01; break; case HWSIM_REGTEST_CUSTOM_WORLD_2: if (i == 0) - regd = &hwsim_world_regdom_custom_01; + param.regd = &hwsim_world_regdom_custom_01; else if (i == 1) - regd = &hwsim_world_regdom_custom_02; + param.regd = &hwsim_world_regdom_custom_02; break; case HWSIM_REGTEST_STRICT_FOLLOW: if (i == 0) { - reg_strict = true; - reg_alpha2 = hwsim_alpha2s[0]; + param.reg_strict = true; + param.reg_alpha2 = hwsim_alpha2s[0]; } break; case HWSIM_REGTEST_STRICT_AND_DRIVER_REG: if (i == 0) { - reg_strict = true; - reg_alpha2 = hwsim_alpha2s[0]; + param.reg_strict = true; + param.reg_alpha2 = hwsim_alpha2s[0]; } else if (i == 1) { - reg_alpha2 = hwsim_alpha2s[1]; + param.reg_alpha2 = hwsim_alpha2s[1]; } break; case HWSIM_REGTEST_ALL: switch (i) { case 0: - regd = &hwsim_world_regdom_custom_01; + param.regd = &hwsim_world_regdom_custom_01; break; case 1: - regd = &hwsim_world_regdom_custom_02; + param.regd = &hwsim_world_regdom_custom_02; break; case 2: - reg_alpha2 = hwsim_alpha2s[0]; + param.reg_alpha2 = hwsim_alpha2s[0]; break; case 3: - reg_alpha2 = hwsim_alpha2s[1]; + param.reg_alpha2 = hwsim_alpha2s[1]; break; case 4: - reg_strict = true; - reg_alpha2 = hwsim_alpha2s[2]; + param.reg_strict = true; + param.reg_alpha2 = hwsim_alpha2s[2]; break; } break; @@ -2675,10 +3169,10 @@ static int __init init_mac80211_hwsim(void) break; } - err = mac80211_hwsim_create_radio(channels, reg_alpha2, - regd, reg_strict, - support_p2p_device, - channels > 1); + param.p2p_device = support_p2p_device; + param.use_chanctx = channels > 1; + + err = mac80211_hwsim_new_radio(NULL, ¶m); if (err < 0) goto out_free_radios; } @@ -2704,10 +3198,6 @@ static int __init init_mac80211_hwsim(void) } rtnl_unlock(); - err = hwsim_init_netlink(); - if (err < 0) - goto out_free_mon; - return 0; out_free_mon: diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h index c9d0315575bab27035378378d396a46db828bf4c..66e1c73bd50716af22d5f51db4f9fe179f81fadf 100644 --- a/drivers/net/wireless/mac80211_hwsim.h +++ b/drivers/net/wireless/mac80211_hwsim.h @@ -60,14 +60,17 @@ enum hwsim_tx_control_flags { * space, uses: * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER, * %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE, - * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE + * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE, %HWSIM_ATTR_FREQ (optional) * @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to * kernel, uses: * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS, * %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE - * @HWSIM_CMD_CREATE_RADIO: create a new radio with the given parameters, - * returns the radio ID (>= 0) or negative on errors - * @HWSIM_CMD_DESTROY_RADIO: destroy a radio + * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters, + * returns the radio ID (>= 0) or negative on errors, if successful + * then multicast the result + * @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted + * @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses: + * %HWSIM_ATTR_RADIO_ID * @__HWSIM_CMD_MAX: enum limit */ enum { @@ -75,12 +78,16 @@ enum { HWSIM_CMD_REGISTER, HWSIM_CMD_FRAME, HWSIM_CMD_TX_INFO_FRAME, - HWSIM_CMD_CREATE_RADIO, - HWSIM_CMD_DESTROY_RADIO, + HWSIM_CMD_NEW_RADIO, + HWSIM_CMD_DEL_RADIO, + HWSIM_CMD_GET_RADIO, __HWSIM_CMD_MAX, }; #define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1) +#define HWSIM_CMD_CREATE_RADIO HWSIM_CMD_NEW_RADIO +#define HWSIM_CMD_DESTROY_RADIO HWSIM_CMD_DEL_RADIO + /** * enum hwsim_attrs - hwsim netlink attributes * @@ -111,6 +118,11 @@ enum { * @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO * command to force use of channel contexts even when only a * single channel is supported + * @HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE: used with the %HWSIM_CMD_CREATE_RADIO + * command to force radio removal when process that created the radio dies + * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666 + * @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio. + * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received. * @__HWSIM_ATTR_MAX: enum limit */ @@ -132,6 +144,10 @@ enum { HWSIM_ATTR_REG_STRICT_REG, HWSIM_ATTR_SUPPORT_P2P_DEVICE, HWSIM_ATTR_USE_CHANCTX, + HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE, + HWSIM_ATTR_RADIO_NAME, + HWSIM_ATTR_NO_VIF, + HWSIM_ATTR_FREQ, __HWSIM_ATTR_MAX, }; #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index 62f5dbe602d3de46f8daf6408f776670e024fe05..9d4786e7ddff461d982d90a48b9e85919f69615f 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -544,6 +544,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) u32 tx_win_size = priv->add_ba_param.tx_win_size; static u8 dialog_tok; int ret; + unsigned long flags; u16 block_ack_param_set; dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid); @@ -554,15 +555,18 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) { struct mwifiex_sta_node *sta_ptr; + spin_lock_irqsave(&priv->sta_list_spinlock, flags); sta_ptr = mwifiex_get_sta_entry(priv, peer_mac); if (!sta_ptr) { dev_warn(priv->adapter->dev, "BA setup with unknown TDLS peer %pM!\n", peer_mac); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); return -1; } if (sta_ptr->is_11ac_enabled) tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE; + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); } block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) | diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h index 2ee268b632be56ca1aa994ed9b1de7baece31d3d..f275675cdbd3b331846c7f05528acf4d6909932d 100644 --- a/drivers/net/wireless/mwifiex/11n.h +++ b/drivers/net/wireless/mwifiex/11n.h @@ -84,6 +84,8 @@ mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv, { struct mwifiex_tx_ba_stream_tbl *tx_tbl; + if (is_broadcast_ether_addr(ptr->ra)) + return false; tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra); if (tx_tbl) return tx_tbl->amsdu; @@ -96,6 +98,8 @@ static inline u8 mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, struct mwifiex_ra_list_tbl *ptr, int tid) { + if (is_broadcast_ether_addr(ptr->ra)) + return false; if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { return mwifiex_is_station_ampdu_allowed(priv, ptr, tid); } else { diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index 5ef5a0eeba50eb682d30e5879f565ac48ad217e4..d73fda312c87a53278b3fda1fc5c3fb521a2b805 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -351,6 +351,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, new_node->init_win = seq_num; new_node->flags = 0; + spin_lock_irqsave(&priv->sta_list_spinlock, flags); if (mwifiex_queuing_ra_based(priv)) { dev_dbg(priv->adapter->dev, "info: AP/ADHOC:last_seq=%d start_win=%d\n", @@ -367,6 +368,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, else last_seq = priv->rx_seq[tid]; } + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM && last_seq >= new_node->start_win) { @@ -455,22 +457,26 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv, u32 rx_win_size = priv->add_ba_param.rx_win_size; u8 tid; int win_size; + unsigned long flags; uint16_t block_ack_param_set; if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && priv->adapter->is_hw_11ac_capable && memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) { + spin_lock_irqsave(&priv->sta_list_spinlock, flags); sta_ptr = mwifiex_get_sta_entry(priv, cmd_addba_req->peer_mac_addr); if (!sta_ptr) { dev_warn(priv->adapter->dev, "BA setup with unknown TDLS peer %pM!\n", cmd_addba_req->peer_mac_addr); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); return -1; } if (sta_ptr->is_11ac_enabled) rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE; + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); } cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP); diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index e70d0df9b0da4449c58efa03a8a047eeae9d173b..aa01c9bc77f916db68de7c51f8f848d45dc8fa26 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -31,7 +31,7 @@ config MWIFIEX_PCIE mwifiex_pcie. config MWIFIEX_USB - tristate "Marvell WiFi-Ex Driver for USB8797/8897" + tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897" depends on MWIFIEX && USB select FW_LOADER ---help--- diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 0dd672954ad10316bec599a0f2eeb56b70e66c75..4a66a655536647ed0a72d5a1e41bcccc15fae15c 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -194,10 +194,17 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, tx_info->pkt_len = pkt_len; mwifiex_form_mgmt_frame(skb, buf, len); - mwifiex_queue_tx_pkt(priv, skb); - *cookie = prandom_u32() | 1; - cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC); + + if (ieee80211_is_action(mgmt->frame_control)) + skb = mwifiex_clone_skb_for_tx_status(priv, + skb, + MWIFIEX_BUF_FLAG_ACTION_TX_STATUS, cookie); + else + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, + GFP_ATOMIC); + + mwifiex_queue_tx_pkt(priv, skb); wiphy_dbg(wiphy, "info: management frame transmitted\n"); return 0; @@ -992,6 +999,52 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, return mwifiex_dump_station_info(priv, sinfo); } +static int +mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, + int idx, struct survey_info *survey) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats; + enum ieee80211_band band; + + dev_dbg(priv->adapter->dev, "dump_survey idx=%d\n", idx); + + memset(survey, 0, sizeof(struct survey_info)); + + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && + priv->media_connected && idx == 0) { + u8 curr_bss_band = priv->curr_bss_params.band; + u32 chan = priv->curr_bss_params.bss_descriptor.channel; + + band = mwifiex_band_to_radio_type(curr_bss_band); + survey->channel = ieee80211_get_channel(wiphy, + ieee80211_channel_to_frequency(chan, band)); + + if (priv->bcn_nf_last) { + survey->filled = SURVEY_INFO_NOISE_DBM; + survey->noise = priv->bcn_nf_last; + } + return 0; + } + + if (idx >= priv->adapter->num_in_chan_stats) + return -ENOENT; + + if (!pchan_stats[idx].cca_scan_dur) + return 0; + + band = pchan_stats[idx].bandcfg; + survey->channel = ieee80211_get_channel(wiphy, + ieee80211_channel_to_frequency(pchan_stats[idx].chan_num, band)); + survey->filled = SURVEY_INFO_NOISE_DBM | + SURVEY_INFO_CHANNEL_TIME | SURVEY_INFO_CHANNEL_TIME_BUSY; + survey->noise = pchan_stats[idx].noise; + survey->channel_time = pchan_stats[idx].cca_scan_dur; + survey->channel_time_busy = pchan_stats[idx].cca_busy_dur; + + return 0; +} + /* Supported rates to be advertised to the cfg80211 */ static struct ieee80211_rate mwifiex_rates[] = { {.bitrate = 10, .hw_value = 2, }, @@ -1239,36 +1292,34 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy, */ static int mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, - const u8 *mac) + struct station_del_parameters *params) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct mwifiex_sta_node *sta_node; + u8 deauth_mac[ETH_ALEN]; unsigned long flags; if (list_empty(&priv->sta_list) || !priv->bss_started) return 0; - if (!mac || is_broadcast_ether_addr(mac)) { - wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__); - list_for_each_entry(sta_node, &priv->sta_list, list) { - if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH, - HostCmd_ACT_GEN_SET, 0, - sta_node->mac_addr, true)) - return -1; - mwifiex_uap_del_sta_data(priv, sta_node); - } - } else { - wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, mac); - spin_lock_irqsave(&priv->sta_list_spinlock, flags); - sta_node = mwifiex_get_sta_entry(priv, mac); - spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); - if (sta_node) { - if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH, - HostCmd_ACT_GEN_SET, 0, - sta_node->mac_addr, true)) - return -1; - mwifiex_uap_del_sta_data(priv, sta_node); - } + if (!params->mac || is_broadcast_ether_addr(params->mac)) + return 0; + + wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac); + + memset(deauth_mac, 0, ETH_ALEN); + + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_node = mwifiex_get_sta_entry(priv, params->mac); + if (sta_node) + ether_addr_copy(deauth_mac, params->mac); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + + if (is_valid_ether_addr(deauth_mac)) { + if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH, + HostCmd_ACT_GEN_SET, 0, + deauth_mac, true)) + return -1; } return 0; @@ -1759,6 +1810,10 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, dev_dbg(priv->adapter->dev, "info: associated to bssid %pM successfully\n", priv->cfg_bssid); + if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && + priv->adapter->auto_tdls && + priv->bss_type == MWIFIEX_BSS_TYPE_STA) + mwifiex_setup_auto_tdls_timer(priv); } else { dev_dbg(priv->adapter->dev, "info: association to bssid %pM failed\n", @@ -2630,11 +2685,13 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, dev_dbg(priv->adapter->dev, "Send TDLS Setup Request to %pM status_code=%d\n", peer, status_code); + mwifiex_add_auto_tdls_peer(priv, peer); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; case WLAN_TDLS_SETUP_RESPONSE: + mwifiex_add_auto_tdls_peer(priv, peer); dev_dbg(priv->adapter->dev, "Send TDLS Setup Response to %pM status_code=%d\n", peer, status_code); @@ -2779,6 +2836,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .disconnect = mwifiex_cfg80211_disconnect, .get_station = mwifiex_cfg80211_get_station, .dump_station = mwifiex_cfg80211_dump_station, + .dump_survey = mwifiex_cfg80211_dump_survey, .set_wiphy_params = mwifiex_cfg80211_set_wiphy_params, .join_ibss = mwifiex_cfg80211_join_ibss, .leave_ibss = mwifiex_cfg80211_leave_ibss, @@ -2840,6 +2898,25 @@ static const struct wiphy_coalesce_support mwifiex_coalesce_support = { .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, }; +int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter) +{ + u32 n_channels_bg, n_channels_a = 0; + + n_channels_bg = mwifiex_band_2ghz.n_channels; + + if (adapter->config_bands & BAND_A) + n_channels_a = mwifiex_band_5ghz.n_channels; + + adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a); + adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) * + adapter->num_in_chan_stats); + + if (!adapter->chan_stats) + return -ENOMEM; + + return 0; +} + /* * This function registers the device with CFG802.11 subsystem. * @@ -2915,6 +2992,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) NL80211_FEATURE_INACTIVITY_TIMER | NL80211_FEATURE_NEED_OBSS_SCAN; + if (adapter->fw_api_ver == MWIFIEX_FW_V15) + wiphy->features |= NL80211_FEATURE_SK_TX_STATUS; + /* Reserve space for mwifiex specific private data for BSS */ wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv); diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index e0d00a7f0ec314f9dd03e288af38cbd5c6aa1731..2269acf41ad8e6c63498a1728d00d771e162e59b 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -76,6 +76,8 @@ #define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0) #define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1) #define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2) +#define MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS BIT(3) +#define MWIFIEX_BUF_FLAG_ACTION_TX_STATUS BIT(4) #define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024 #define MWIFIEX_BRIDGED_PKTS_THR_LOW 128 @@ -85,6 +87,11 @@ #define MWIFIEX_TDLS_CREATE_LINK 0x02 #define MWIFIEX_TDLS_CONFIG_LINK 0x03 +#define MWIFIEX_TDLS_RSSI_HIGH 50 +#define MWIFIEX_TDLS_RSSI_LOW 55 +#define MWIFIEX_TDLS_MAX_FAIL_COUNT 4 +#define MWIFIEX_AUTO_TDLS_IDLE_TIME 10 + enum mwifiex_bss_type { MWIFIEX_BSS_TYPE_STA = 0, MWIFIEX_BSS_TYPE_UAP = 1, @@ -154,6 +161,8 @@ struct mwifiex_txinfo { u8 bss_num; u8 bss_type; u32 pkt_len; + u8 ack_frame_id; + u64 cookie; }; enum mwifiex_wmm_ac_e { @@ -185,4 +194,14 @@ struct mwifiex_arp_eth_header { u8 ar_tha[ETH_ALEN]; u8 ar_tip[4]; } __packed; + +struct mwifiex_chan_stats { + u8 chan_num; + u8 bandcfg; + u8 flags; + s8 noise; + u16 total_bss; + u16 cca_scan_dur; + u16 cca_busy_dur; +} __packed; #endif /* !_MWIFIEX_DECL_H_ */ diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 1eb61739071fd828bac55865a9df41709e1ccc08..fb5936eb82e3b65121b95b882e38719a1cbb96cb 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -172,6 +172,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) #define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197) #define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199) +#define TLV_TYPE_CHANNEL_STATS (PROPRIETARY_TLV_BASE_ID + 198) #define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 @@ -493,6 +494,7 @@ enum P2P_MODES { #define EVENT_TDLS_GENERIC_EVENT 0x00000052 #define EVENT_EXT_SCAN_REPORT 0x00000058 #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f +#define EVENT_TX_STATUS_REPORT 0x00000074 #define EVENT_ID_MASK 0xffff #define BSS_NUM_MASK 0xf @@ -541,6 +543,7 @@ struct mwifiex_ie_types_data { #define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08 #define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10 #define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01 +#define MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS 0x20 struct txpd { u8 bss_type; @@ -552,7 +555,9 @@ struct txpd { u8 priority; u8 flags; u8 pkt_delay_2ms; - u8 reserved1; + u8 reserved1[2]; + u8 tx_token_id; + u8 reserved[2]; } __packed; struct rxpd { @@ -583,6 +588,7 @@ struct rxpd { * [Bit 7] Reserved */ u8 ht_info; + u8 reserved[3]; u8 flags; } __packed; @@ -596,8 +602,9 @@ struct uap_txpd { u8 priority; u8 flags; u8 pkt_delay_2ms; - u8 reserved1; - __le32 reserved2; + u8 reserved1[2]; + u8 tx_token_id; + u8 reserved[2]; }; struct uap_rxpd { @@ -611,6 +618,16 @@ struct uap_rxpd { u8 reserved1; }; +struct mwifiex_fw_chan_stats { + u8 chan_num; + u8 bandcfg; + u8 flags; + s8 noise; + __le16 total_bss; + __le16 cca_scan_dur; + __le16 cca_busy_dur; +} __packed; + enum mwifiex_chan_scan_mode_bitmasks { MWIFIEX_PASSIVE_SCAN = BIT(0), MWIFIEX_DISABLE_CHAN_FILT = BIT(1), @@ -660,6 +677,11 @@ struct mwifiex_ie_types_scan_chan_gap { __le16 chan_gap; } __packed; +struct mwifiex_ietypes_chanstats { + struct mwifiex_ie_types_header header; + struct mwifiex_fw_chan_stats chanstats[0]; +} __packed; + struct mwifiex_ie_types_wildcard_ssid_params { struct mwifiex_ie_types_header header; u8 max_ssid_length; @@ -1207,6 +1229,12 @@ struct mwifiex_event_scan_result { u8 num_of_set; } __packed; +struct tx_status_event { + u8 packet_type; + u8 tx_token_id; + u8 status; +} __packed; + #define MWIFIEX_USER_SCAN_CHAN_MAX 50 #define MWIFIEX_MAX_SSID_LIST_LENGTH 10 diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 580aa45ec4bcfe9716a3271661e98d8fa1c7c82a..520ad4a3018bd1657b7edf267b3bd78af46f052c 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -137,6 +137,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv) priv->csa_expire_time = 0; priv->del_list_idx = 0; priv->hs2_enabled = false; + priv->check_tdls_tx = false; memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID); return mwifiex_add_bss_prio_tbl(priv); @@ -366,6 +367,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) list_del(&priv->tx_ba_stream_tbl_ptr); list_del(&priv->rx_reorder_tbl_ptr); list_del(&priv->sta_list); + list_del(&priv->auto_tdls_list); } } } @@ -434,6 +436,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) spin_lock_init(&priv->wmm.ra_list_spinlock); spin_lock_init(&priv->curr_bcn_buf_lock); spin_lock_init(&priv->sta_list_spinlock); + spin_lock_init(&priv->auto_tdls_lock); } } @@ -449,7 +452,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) spin_lock_init(&adapter->scan_pending_q_lock); spin_lock_init(&adapter->rx_proc_lock); - skb_queue_head_init(&adapter->usb_rx_data_q); skb_queue_head_init(&adapter->rx_data_q); for (i = 0; i < adapter->priv_num; ++i) { @@ -466,10 +468,14 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr); INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); INIT_LIST_HEAD(&priv->sta_list); + INIT_LIST_HEAD(&priv->auto_tdls_list); skb_queue_head_init(&priv->tdls_txq); spin_lock_init(&priv->tx_ba_stream_tbl_lock); spin_lock_init(&priv->rx_reorder_tbl_lock); + + spin_lock_init(&priv->ack_status_lock); + idr_init(&priv->ack_status_frames); } return 0; @@ -646,6 +652,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) if (adapter->priv[i]) { priv = adapter->priv[i]; + mwifiex_clean_auto_tdls(priv); mwifiex_clean_txrx(priv); mwifiex_delete_bss_prio_tbl(priv); } @@ -668,19 +675,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) spin_lock(&adapter->mwifiex_lock); - if (adapter->if_ops.data_complete) { - while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) { - struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb); - - priv = adapter->priv[rx_info->bss_num]; - if (priv) - priv->stats.rx_dropped++; - - dev_kfree_skb_any(skb); - adapter->if_ops.data_complete(adapter); - } - } - mwifiex_adapter_cleanup(adapter); spin_unlock(&adapter->mwifiex_lock); diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 8d6c25908b6d012fc9f2ef3d9f9211fd5f09b862..411a6c2f4aca5da922409bf33c1aa7eae5a92f00 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -880,9 +880,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, /* Set Capability info */ bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_IBSS; - tmp_cap = le16_to_cpu(adhoc_start->cap_info_bitmap); - tmp_cap &= ~WLAN_CAPABILITY_ESS; - tmp_cap |= WLAN_CAPABILITY_IBSS; + tmp_cap = WLAN_CAPABILITY_IBSS; /* Set up privacy in bss_desc */ if (priv->sec_info.encryption_mode) { diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index d5070c444fe1fcb53ff95a67024b12013ed547dc..d4d2223d1f317585c7248e368501613a3574d9f1 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -28,6 +28,11 @@ const char driver_version[] = "mwifiex " VERSION " (%s) "; static char *cal_data_cfg; module_param(cal_data_cfg, charp, 0); +static unsigned short driver_mode; +module_param(driver_mode, ushort, 0); +MODULE_PARM_DESC(driver_mode, + "station=0x1(default), ap-sta=0x3, station-p2p=0x5, ap-sta-p2p=0x7"); + /* * This function registers the device and performs all the necessary * initializations. @@ -122,6 +127,7 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter) } } + vfree(adapter->chan_stats); kfree(adapter); return 0; } @@ -143,8 +149,11 @@ static int mwifiex_process_rx(struct mwifiex_adapter *adapter) /* Check for Rx data */ while ((skb = skb_dequeue(&adapter->rx_data_q))) { atomic_dec(&adapter->rx_pending); - if (adapter->delay_main_work && + if ((adapter->delay_main_work || + adapter->iface_type == MWIFIEX_USB) && (atomic_read(&adapter->rx_pending) < LOW_RX_PENDING)) { + if (adapter->if_ops.submit_rem_rx_urbs) + adapter->if_ops.submit_rem_rx_urbs(adapter); adapter->delay_main_work = false; queue_work(adapter->workqueue, &adapter->main_work); } @@ -177,7 +186,6 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) { int ret = 0; unsigned long flags; - struct sk_buff *skb; spin_lock_irqsave(&adapter->main_proc_lock, flags); @@ -195,12 +203,15 @@ process_start: (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)) break; - /* If we process interrupts first, it would increase RX pending - * even further. Avoid this by checking if rx_pending has - * crossed high threshold and schedule rx work queue - * and then process interrupts + /* For non-USB interfaces, If we process interrupts first, it + * would increase RX pending even further. Avoid this by + * checking if rx_pending has crossed high threshold and + * schedule rx work queue and then process interrupts. + * For USB interface, there are no interrupts. We already have + * HIGH_RX_PENDING check in usb.c */ - if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING) { + if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING && + adapter->iface_type != MWIFIEX_USB) { adapter->delay_main_work = true; if (!adapter->rx_processing) queue_work(adapter->rx_workqueue, @@ -252,11 +263,6 @@ process_start: } } - /* Check Rx data for USB */ - if (adapter->iface_type == MWIFIEX_USB) - while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) - mwifiex_handle_rx_packet(adapter, skb); - /* Check for event */ if (adapter->event_received) { adapter->event_received = false; @@ -447,6 +453,16 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) goto err_init_fw; } + if (mwifiex_init_channel_scan_gap(adapter)) { + dev_err(adapter->dev, "could not init channel stats table\n"); + goto err_init_fw; + } + + if (driver_mode) { + driver_mode &= MWIFIEX_DRIVER_MODE_BITMASK; + driver_mode |= MWIFIEX_DRIVER_MODE_STA; + } + rtnl_lock(); /* Create station interface by default */ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", @@ -456,6 +472,28 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) rtnl_unlock(); goto err_add_intf; } + + if (driver_mode & MWIFIEX_DRIVER_MODE_UAP) { + wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", + NL80211_IFTYPE_AP, NULL, NULL); + if (IS_ERR(wdev)) { + dev_err(adapter->dev, "cannot create AP interface\n"); + rtnl_unlock(); + goto err_add_intf; + } + } + + if (driver_mode & MWIFIEX_DRIVER_MODE_P2P) { + wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d", + NL80211_IFTYPE_P2P_CLIENT, NULL, + NULL); + if (IS_ERR(wdev)) { + dev_err(adapter->dev, + "cannot create p2p client interface\n"); + rtnl_unlock(); + goto err_add_intf; + } + } rtnl_unlock(); mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); @@ -570,6 +608,48 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) return 0; } +struct sk_buff * +mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv, + struct sk_buff *skb, u8 flag, u64 *cookie) +{ + struct sk_buff *orig_skb = skb; + struct mwifiex_txinfo *tx_info, *orig_tx_info; + + skb = skb_clone(skb, GFP_ATOMIC); + if (skb) { + unsigned long flags; + int id; + + spin_lock_irqsave(&priv->ack_status_lock, flags); + id = idr_alloc(&priv->ack_status_frames, orig_skb, + 1, 0xff, GFP_ATOMIC); + spin_unlock_irqrestore(&priv->ack_status_lock, flags); + + if (id >= 0) { + tx_info = MWIFIEX_SKB_TXCB(skb); + tx_info->ack_frame_id = id; + tx_info->flags |= flag; + orig_tx_info = MWIFIEX_SKB_TXCB(orig_skb); + orig_tx_info->ack_frame_id = id; + orig_tx_info->flags |= flag; + + if (flag == MWIFIEX_BUF_FLAG_ACTION_TX_STATUS && cookie) + orig_tx_info->cookie = *cookie; + + } else if (skb_shared(skb)) { + kfree_skb(orig_skb); + } else { + kfree_skb(skb); + skb = orig_skb; + } + } else { + /* couldn't clone -- lose tx status ... */ + skb = orig_skb; + } + + return skb; +} + /* * CFG802.11 network device handler for data transmission. */ @@ -579,6 +659,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct sk_buff *new_skb; struct mwifiex_txinfo *tx_info; + bool multicast; dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n", jiffies, priv->bss_type, priv->bss_num); @@ -619,6 +700,15 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->bss_type = priv->bss_type; tx_info->pkt_len = skb->len; + multicast = is_multicast_ether_addr(skb->data); + + if (unlikely(!multicast && skb->sk && + skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS && + priv->adapter->fw_api_ver == MWIFIEX_FW_V15)) + skb = mwifiex_clone_skb_for_tx_status(priv, + skb, + MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS, NULL); + /* Record the current time the packet was queued; used to * determine the amount of time the packet was queued in * the driver before it was sent to the firmware. @@ -628,6 +718,13 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) */ __net_timestamp(skb); + if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && + priv->bss_type == MWIFIEX_BSS_TYPE_STA && + !ether_addr_equal_unaligned(priv->cfg_bssid, skb->data)) { + if (priv->adapter->auto_tdls && priv->check_tdls_tx) + mwifiex_tdls_check_tx(priv, skb); + } + mwifiex_queue_tx_pkt(priv, skb); return 0; @@ -858,7 +955,7 @@ mwifiex_add_card(void *card, struct semaphore *sem, adapter->cmd_wait_q.status = 0; adapter->scan_wait_q_woken = false; - if (num_possible_cpus() > 1) { + if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) { adapter->rx_work_enabled = true; pr_notice("rx work enabled, cpus %d\n", num_possible_cpus()); } diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index f55658d15c60710d44603d12b9fbab58414050fc..e66993cb5daf185f32c9053ce30577678cd861a6 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -34,6 +34,7 @@ #include #include #include +#include #include "decl.h" #include "ioctl.h" @@ -48,6 +49,11 @@ enum { MWIFIEX_SYNC_CMD }; +#define MWIFIEX_DRIVER_MODE_STA BIT(0) +#define MWIFIEX_DRIVER_MODE_UAP BIT(1) +#define MWIFIEX_DRIVER_MODE_P2P BIT(2) +#define MWIFIEX_DRIVER_MODE_BITMASK (BIT(0) | BIT(1) | BIT(2)) + #define MWIFIEX_MAX_AP 64 #define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ) @@ -106,10 +112,7 @@ enum { */ #define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \ adapter->event_received || \ - ((adapter->iface_type != MWIFIEX_USB) && \ - adapter->data_received) || \ - ((adapter->iface_type == MWIFIEX_USB) && \ - !skb_queue_empty(&adapter->usb_rx_data_q))) + adapter->data_received) #define MWIFIEX_TYPE_CMD 1 #define MWIFIEX_TYPE_DATA 0 @@ -504,8 +507,11 @@ struct mwifiex_private { struct mwifiex_wmm_desc wmm; atomic_t wmm_tx_pending[IEEE80211_NUM_ACS]; struct list_head sta_list; - /* spin lock for associated station list */ + /* spin lock for associated station/TDLS peers list */ spinlock_t sta_list_spinlock; + struct list_head auto_tdls_list; + /* spin lock for auto TDLS peer list */ + spinlock_t auto_tdls_lock; struct list_head tx_ba_stream_tbl_ptr; /* spin lock for tx_ba_stream_tbl_ptr queue */ spinlock_t tx_ba_stream_tbl_lock; @@ -570,6 +576,12 @@ struct mwifiex_private { bool hs2_enabled; struct station_parameters *sta_params; struct sk_buff_head tdls_txq; + u8 check_tdls_tx; + struct timer_list auto_tdls_timer; + bool auto_tdls_timer_active; + struct idr ack_status_frames; + /* spin lock for ack status */ + spinlock_t ack_status_lock; }; enum mwifiex_ba_status { @@ -670,6 +682,17 @@ struct mwifiex_sta_node { struct mwifiex_tdls_capab tdls_cap; }; +struct mwifiex_auto_tdls_peer { + struct list_head list; + u8 mac_addr[ETH_ALEN]; + u8 tdls_status; + int rssi; + long rssi_jiffies; + u8 failure_count; + u8 do_discover; + u8 do_setup; +}; + struct mwifiex_if_ops { int (*init_if) (struct mwifiex_adapter *); void (*cleanup_if) (struct mwifiex_adapter *); @@ -690,13 +713,13 @@ struct mwifiex_if_ops { void (*cleanup_mpa_buf) (struct mwifiex_adapter *); int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *); int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); - int (*data_complete) (struct mwifiex_adapter *); int (*init_fw_port) (struct mwifiex_adapter *); int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); void (*card_reset) (struct mwifiex_adapter *); void (*fw_dump)(struct mwifiex_adapter *); int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); void (*iface_work)(struct work_struct *work); + void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); }; struct mwifiex_adapter { @@ -767,7 +790,6 @@ struct mwifiex_adapter { spinlock_t scan_pending_q_lock; /* spin lock for RX processing routine */ spinlock_t rx_proc_lock; - struct sk_buff_head usb_rx_data_q; u32 scan_processing; u16 region_code; struct mwifiex_802_11d_domain_reg domain_reg; @@ -845,6 +867,10 @@ struct mwifiex_adapter { u8 curr_mem_idx; bool scan_chan_gap_enabled; struct sk_buff_head rx_data_q; + struct mwifiex_chan_stats *chan_stats; + u32 num_in_chan_stats; + int survey_idx; + bool auto_tdls; }; int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); @@ -949,6 +975,8 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, int mwifiex_process_sta_event(struct mwifiex_private *); int mwifiex_process_uap_event(struct mwifiex_private *); void mwifiex_delete_all_station_list(struct mwifiex_private *priv); +void mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, + const u8 *ra_addr); void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb); void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb); int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta); @@ -1031,7 +1059,8 @@ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv); int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, void *data_buf); -int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv); +int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp); int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, void *buf); @@ -1301,6 +1330,24 @@ u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band, u32 pri_chan, u8 chan_bw); int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter); +int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb); +void mwifiex_flush_auto_tdls_list(struct mwifiex_private *priv); +void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv, + const u8 *mac, u8 link_status); +void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv, + u8 *mac, s8 snr, s8 nflr); +void mwifiex_check_auto_tdls(unsigned long context); +void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac); +void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv); +void mwifiex_clean_auto_tdls(struct mwifiex_private *priv); + +void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, + void *event_body); + +struct sk_buff * +mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv, + struct sk_buff *skb, u8 flag, u64 *cookie); + #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); void mwifiex_debugfs_remove(void); diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index ca64d4c941127511bde59dd76d8b2c47299db47b..984a7a4fa93b4920600255ec79966b3ab5272e49 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1623,7 +1623,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, if (*bytes_left >= sizeof(beacon_size)) { /* Extract & convert beacon size from command buffer */ - memcpy(&beacon_size, *bss_info, sizeof(beacon_size)); + beacon_size = le16_to_cpu(*(__le16 *)(*bss_info)); *bytes_left -= sizeof(beacon_size); *bss_info += sizeof(beacon_size); } @@ -1755,6 +1755,7 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv) { struct mwifiex_adapter *adapter = priv->adapter; + adapter->survey_idx = 0; if (adapter->curr_cmd->wait_q_enabled) { adapter->cmd_wait_q.status = 0; if (!priv->scan_request) { @@ -1976,10 +1977,53 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv, return 0; } +static void +mwifiex_update_chan_statistics(struct mwifiex_private *priv, + struct mwifiex_ietypes_chanstats *tlv_stat) +{ + struct mwifiex_adapter *adapter = priv->adapter; + u8 i, num_chan; + struct mwifiex_fw_chan_stats *fw_chan_stats; + struct mwifiex_chan_stats chan_stats; + + fw_chan_stats = (void *)((u8 *)tlv_stat + + sizeof(struct mwifiex_ie_types_header)); + num_chan = le16_to_cpu(tlv_stat->header.len) / + sizeof(struct mwifiex_chan_stats); + + for (i = 0 ; i < num_chan; i++) { + chan_stats.chan_num = fw_chan_stats->chan_num; + chan_stats.bandcfg = fw_chan_stats->bandcfg; + chan_stats.flags = fw_chan_stats->flags; + chan_stats.noise = fw_chan_stats->noise; + chan_stats.total_bss = le16_to_cpu(fw_chan_stats->total_bss); + chan_stats.cca_scan_dur = + le16_to_cpu(fw_chan_stats->cca_scan_dur); + chan_stats.cca_busy_dur = + le16_to_cpu(fw_chan_stats->cca_busy_dur); + dev_dbg(adapter->dev, + "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n", + chan_stats.chan_num, + chan_stats.noise, + chan_stats.total_bss, + chan_stats.cca_scan_dur, + chan_stats.cca_busy_dur); + memcpy(&adapter->chan_stats[adapter->survey_idx++], &chan_stats, + sizeof(struct mwifiex_chan_stats)); + fw_chan_stats++; + } +} + /* This function handles the command response of extended scan */ -int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv) +int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp) { struct mwifiex_adapter *adapter = priv->adapter; + struct host_cmd_ds_802_11_scan_ext *ext_scan_resp; + struct mwifiex_ie_types_header *tlv; + struct mwifiex_ietypes_chanstats *tlv_stat; + u16 buf_left, type, len; + struct host_cmd_ds_command *cmd_ptr; struct cmd_ctrl_node *cmd_node; unsigned long cmd_flags, scan_flags; @@ -1987,6 +2031,36 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv) dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n"); + ext_scan_resp = &resp->params.ext_scan; + + tlv = (void *)ext_scan_resp->tlv_buffer; + buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN + - 1); + + while (buf_left >= sizeof(struct mwifiex_ie_types_header)) { + type = le16_to_cpu(tlv->type); + len = le16_to_cpu(tlv->len); + + if (buf_left < (sizeof(struct mwifiex_ie_types_header) + len)) { + dev_err(adapter->dev, + "error processing scan response TLVs"); + break; + } + + switch (type) { + case TLV_TYPE_CHANNEL_STATS: + tlv_stat = (void *)tlv; + mwifiex_update_chan_statistics(priv, tlv_stat); + break; + default: + break; + } + + buf_left -= len + sizeof(struct mwifiex_ie_types_header); + tlv = (void *)((u8 *)tlv + len + + sizeof(struct mwifiex_ie_types_header)); + } + spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_flags); spin_lock_irqsave(&adapter->scan_pending_q_lock, scan_flags); if (list_empty(&adapter->scan_pending_q)) { diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index b25766b43b9f958120c398a1ced188416508f42f..933dae137850c1d85e585264e14ea046b90f686e 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -106,6 +106,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size; card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size; card->supports_fw_dump = data->supports_fw_dump; + card->auto_tdls = data->auto_tdls; } sdio_claim_host(func); @@ -1880,6 +1881,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) return -1; } + adapter->auto_tdls = card->auto_tdls; return ret; } diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index 20cd9adc98d3c5c5c98edf5e873b1b65e104c6ee..54c07156dd78351ad5ff36f22f0c6cd062376468 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h @@ -246,6 +246,7 @@ struct sdio_mmc_card { u8 curr_wr_port; u8 *mp_regs; + u8 auto_tdls; struct mwifiex_sdio_mpa_tx mpa_tx; struct mwifiex_sdio_mpa_rx mpa_rx; @@ -262,6 +263,7 @@ struct mwifiex_sdio_device { u16 tx_buf_size; u32 mp_tx_agg_buf_size; u32 mp_rx_agg_buf_size; + u8 auto_tdls; }; static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = { @@ -387,6 +389,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = { .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .supports_fw_dump = false, + .auto_tdls = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { @@ -400,6 +403,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .supports_fw_dump = false, + .auto_tdls = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { @@ -413,6 +417,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .supports_fw_dump = false, + .auto_tdls = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { @@ -426,6 +431,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .supports_fw_dump = true, + .auto_tdls = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { @@ -439,6 +445,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .supports_fw_dump = false, + .auto_tdls = true, }; /* diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 4aad44685f8dff43f80e5ed79a729bf99dbe8d5c..b65e1014b0fccc1c2124b323416f062de3ba7db4 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -983,7 +983,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, adapter->curr_cmd->wait_q_enabled = false; break; case HostCmd_CMD_802_11_SCAN_EXT: - ret = mwifiex_ret_802_11_scan_ext(priv); + ret = mwifiex_ret_802_11_scan_ext(priv, resp); adapter->curr_cmd->wait_q_enabled = false; break; case HostCmd_CMD_802_11_BG_SCAN_QUERY: diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index f1c240eca0cdae2e6364f67e0aab150497afed2b..b8c171df6223d827da184ec4617c6ac14791752e 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -55,9 +55,13 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) priv->scan_block = false; if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && - ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) + ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) { mwifiex_disable_all_tdls_links(priv); + if (priv->adapter->auto_tdls) + mwifiex_clean_auto_tdls(priv); + } + /* Free Tx and Rx packets, report disconnect to upper layer */ mwifiex_clean_txrx(priv); @@ -163,9 +167,6 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, NL80211_TDLS_TEARDOWN, le16_to_cpu(tdls_evt->u.reason_code), GFP_KERNEL); - ret = mwifiex_tdls_oper(priv, tdls_evt->peer_mac, - MWIFIEX_TDLS_DISABLE_LINK); - queue_work(adapter->workqueue, &adapter->main_work); break; default: break; @@ -503,6 +504,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) ret = mwifiex_parse_tdls_event(priv, adapter->event_skb); break; + case EVENT_TX_STATUS_REPORT: + dev_dbg(adapter->dev, "event: TX_STATUS Report\n"); + mwifiex_parse_tx_status_event(priv, adapter->event_body); + break; + default: dev_dbg(adapter->dev, "event: unknown event id: %#x\n", eventcause); diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 92f3eb8398662700298e6ba81eab855ecdbe554b..1626868a4b5ce7935af22b31c8e506f1db923d06 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -1026,12 +1026,12 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version, int max_len) { union { - u32 l; + __le32 l; u8 c[4]; } ver; char fw_ver[32]; - ver.l = adapter->fw_release_number; + ver.l = cpu_to_le32(adapter->fw_release_number); sprintf(fw_ver, "%u.%u.%u.p%u", ver.c[2], ver.c[1], ver.c[0], ver.c[3]); snprintf(version, max_len, driver_version, fw_ver); diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c index 9ceb1dbe34c532fdae40952a167675d76bdea9aa..c2ad3b63ae7000cd616c42b44f315b053b64606c 100644 --- a/drivers/net/wireless/mwifiex/sta_rx.c +++ b/drivers/net/wireless/mwifiex/sta_rx.c @@ -232,6 +232,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, if (sta_ptr) sta_ptr->rx_seq[local_rx_pd->priority] = le16_to_cpu(local_rx_pd->seq_num); + mwifiex_auto_tdls_update_peer_signal(priv, ta, + local_rx_pd->snr, + local_rx_pd->nf); } } else { if (rx_pkt_type != PKT_TYPE_BAR) diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c index dab7b33c54bed0d2849ba2b275b103bf0925ba1e..b896d7375b52607959535d2103986f82ec678955 100644 --- a/drivers/net/wireless/mwifiex/sta_tx.c +++ b/drivers/net/wireless/mwifiex/sta_tx.c @@ -77,6 +77,12 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, local_tx_pd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb); + if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS || + tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) { + local_tx_pd->tx_token_id = tx_info->ack_frame_id; + local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS; + } + if (local_tx_pd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl)) /* diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c index e2949077f5b5dee5e02cea4d16efbb2456232eea..22884b429be787cadefb8f588e99890f44dd5988 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/mwifiex/tdls.c @@ -24,6 +24,7 @@ #define TDLS_REQ_FIX_LEN 6 #define TDLS_RESP_FIX_LEN 8 #define TDLS_CONFIRM_FIX_LEN 6 +#define MWIFIEX_TDLS_WMM_INFO_SIZE 7 static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv, const u8 *mac, u8 status) @@ -367,6 +368,55 @@ static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb) *pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB; } +static void +mwifiex_tdls_add_wmm_param_ie(struct mwifiex_private *priv, struct sk_buff *skb) +{ + struct ieee80211_wmm_param_ie *wmm; + u8 ac_vi[] = {0x42, 0x43, 0x5e, 0x00}; + u8 ac_vo[] = {0x62, 0x32, 0x2f, 0x00}; + u8 ac_be[] = {0x03, 0xa4, 0x00, 0x00}; + u8 ac_bk[] = {0x27, 0xa4, 0x00, 0x00}; + + wmm = (void *)skb_put(skb, sizeof(*wmm)); + memset(wmm, 0, sizeof(*wmm)); + + wmm->element_id = WLAN_EID_VENDOR_SPECIFIC; + wmm->len = sizeof(*wmm) - 2; + wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */ + wmm->oui[1] = 0x50; + wmm->oui[2] = 0xf2; + wmm->oui_type = 2; /* WME */ + wmm->oui_subtype = 1; /* WME param */ + wmm->version = 1; /* WME ver */ + wmm->qos_info = 0; /* U-APSD not in use */ + + /* use default WMM AC parameters for TDLS link*/ + memcpy(&wmm->ac[0], ac_be, sizeof(ac_be)); + memcpy(&wmm->ac[1], ac_bk, sizeof(ac_bk)); + memcpy(&wmm->ac[2], ac_vi, sizeof(ac_vi)); + memcpy(&wmm->ac[3], ac_vo, sizeof(ac_vo)); +} + +static void +mwifiex_add_wmm_info_ie(struct mwifiex_private *priv, struct sk_buff *skb, + u8 qosinfo) +{ + u8 *buf; + + buf = (void *)skb_put(skb, MWIFIEX_TDLS_WMM_INFO_SIZE + + sizeof(struct ieee_types_header)); + + *buf++ = WLAN_EID_VENDOR_SPECIFIC; + *buf++ = 7; /* len */ + *buf++ = 0x00; /* Microsoft OUI 00:50:F2 */ + *buf++ = 0x50; + *buf++ = 0xf2; + *buf++ = 2; /* WME */ + *buf++ = 0; /* WME info */ + *buf++ = 1; /* WME ver */ + *buf++ = qosinfo; /* U-APSD no in use */ +} + static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, const u8 *peer, u8 action_code, u8 dialog_token, @@ -421,6 +471,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, mwifiex_tdls_add_ext_capab(priv, skb); mwifiex_tdls_add_qos_capab(skb); + mwifiex_add_wmm_info_ie(priv, skb, 0); break; case WLAN_TDLS_SETUP_RESPONSE: @@ -458,6 +509,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, mwifiex_tdls_add_ext_capab(priv, skb); mwifiex_tdls_add_qos_capab(skb); + mwifiex_add_wmm_info_ie(priv, skb, 0); break; case WLAN_TDLS_SETUP_CONFIRM: @@ -466,6 +518,8 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, skb_put(skb, sizeof(tf->u.setup_cfm)); tf->u.setup_cfm.status_code = cpu_to_le16(status_code); tf->u.setup_cfm.dialog_token = dialog_token; + + mwifiex_tdls_add_wmm_param_ie(priv, skb); if (priv->adapter->is_hw_11ac_capable) { ret = mwifiex_tdls_add_vht_oper(priv, peer, skb); if (ret) { @@ -544,6 +598,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, sizeof(struct ieee_types_bss_co_2040) + sizeof(struct ieee80211_ht_operation) + sizeof(struct ieee80211_tdls_lnkie) + + sizeof(struct ieee80211_wmm_param_ie) + extra_ies_len; if (priv->adapter->is_hw_11ac_capable) @@ -973,6 +1028,7 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer) } mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN); + mwifiex_auto_tdls_update_peer_status(priv, peer, TDLS_NOT_SETUP); memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN); tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK; return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER, @@ -1017,6 +1073,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer) memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq)); mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE); + mwifiex_auto_tdls_update_peer_status(priv, peer, + TDLS_SETUP_COMPLETE); } else { dev_dbg(priv->adapter->dev, "tdls: enable link %pM failed\n", peer); @@ -1030,6 +1088,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer) mwifiex_del_sta_entry(priv, peer); } mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN); + mwifiex_auto_tdls_update_peer_status(priv, peer, + TDLS_NOT_SETUP); return -1; } @@ -1097,3 +1157,231 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv) mwifiex_del_all_sta_list(priv); } + +int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb) +{ + struct mwifiex_auto_tdls_peer *peer; + unsigned long flags; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, skb->data); + + spin_lock_irqsave(&priv->auto_tdls_lock, flags); + list_for_each_entry(peer, &priv->auto_tdls_list, list) { + if (!memcmp(mac, peer->mac_addr, ETH_ALEN)) { + if (peer->rssi <= MWIFIEX_TDLS_RSSI_HIGH && + peer->tdls_status == TDLS_NOT_SETUP && + (peer->failure_count < + MWIFIEX_TDLS_MAX_FAIL_COUNT)) { + peer->tdls_status = TDLS_SETUP_INPROGRESS; + dev_dbg(priv->adapter->dev, + "setup TDLS link, peer=%pM rssi=%d\n", + peer->mac_addr, peer->rssi); + + cfg80211_tdls_oper_request(priv->netdev, + peer->mac_addr, + NL80211_TDLS_SETUP, + 0, GFP_ATOMIC); + peer->do_setup = false; + priv->check_tdls_tx = false; + } else if (peer->failure_count < + MWIFIEX_TDLS_MAX_FAIL_COUNT && + peer->do_discover) { + mwifiex_send_tdls_data_frame(priv, + peer->mac_addr, + WLAN_TDLS_DISCOVERY_REQUEST, + 1, 0, NULL, 0); + peer->do_discover = false; + } + } + } + spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); + + return 0; +} + +void mwifiex_flush_auto_tdls_list(struct mwifiex_private *priv) +{ + struct mwifiex_auto_tdls_peer *peer, *tmp_node; + unsigned long flags; + + spin_lock_irqsave(&priv->auto_tdls_lock, flags); + list_for_each_entry_safe(peer, tmp_node, &priv->auto_tdls_list, list) { + list_del(&peer->list); + kfree(peer); + } + + INIT_LIST_HEAD(&priv->auto_tdls_list); + spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); + priv->check_tdls_tx = false; +} + +void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac) +{ + struct mwifiex_auto_tdls_peer *tdls_peer; + unsigned long flags; + + if (!priv->adapter->auto_tdls) + return; + + spin_lock_irqsave(&priv->auto_tdls_lock, flags); + list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) { + if (!memcmp(tdls_peer->mac_addr, mac, ETH_ALEN)) { + tdls_peer->tdls_status = TDLS_SETUP_INPROGRESS; + tdls_peer->rssi_jiffies = jiffies; + spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); + return; + } + } + + /* create new TDLS peer */ + tdls_peer = kzalloc(sizeof(*tdls_peer), GFP_ATOMIC); + if (tdls_peer) { + ether_addr_copy(tdls_peer->mac_addr, mac); + tdls_peer->tdls_status = TDLS_SETUP_INPROGRESS; + tdls_peer->rssi_jiffies = jiffies; + INIT_LIST_HEAD(&tdls_peer->list); + list_add_tail(&tdls_peer->list, &priv->auto_tdls_list); + dev_dbg(priv->adapter->dev, "Add auto TDLS peer= %pM to list\n", + mac); + } + + spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); +} + +void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv, + const u8 *mac, u8 link_status) +{ + struct mwifiex_auto_tdls_peer *peer; + unsigned long flags; + + if (!priv->adapter->auto_tdls) + return; + + spin_lock_irqsave(&priv->auto_tdls_lock, flags); + list_for_each_entry(peer, &priv->auto_tdls_list, list) { + if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) { + if ((link_status == TDLS_NOT_SETUP) && + (peer->tdls_status == TDLS_SETUP_INPROGRESS)) + peer->failure_count++; + else if (link_status == TDLS_SETUP_COMPLETE) + peer->failure_count = 0; + + peer->tdls_status = link_status; + break; + } + } + spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); +} + +void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv, + u8 *mac, s8 snr, s8 nflr) +{ + struct mwifiex_auto_tdls_peer *peer; + unsigned long flags; + + if (!priv->adapter->auto_tdls) + return; + + spin_lock_irqsave(&priv->auto_tdls_lock, flags); + list_for_each_entry(peer, &priv->auto_tdls_list, list) { + if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) { + peer->rssi = nflr - snr; + peer->rssi_jiffies = jiffies; + break; + } + } + spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); +} + +void mwifiex_check_auto_tdls(unsigned long context) +{ + struct mwifiex_private *priv = (struct mwifiex_private *)context; + struct mwifiex_auto_tdls_peer *tdls_peer; + unsigned long flags; + u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; + + if (WARN_ON_ONCE(!priv || !priv->adapter)) { + pr_err("mwifiex: %s: adapter or private structure is NULL\n", + __func__); + return; + } + + if (unlikely(!priv->adapter->auto_tdls)) + return; + + if (!priv->auto_tdls_timer_active) { + dev_dbg(priv->adapter->dev, + "auto TDLS timer inactive; return"); + return; + } + + priv->check_tdls_tx = false; + + if (list_empty(&priv->auto_tdls_list)) { + mod_timer(&priv->auto_tdls_timer, + jiffies + + msecs_to_jiffies(MWIFIEX_TIMER_10S)); + return; + } + + spin_lock_irqsave(&priv->auto_tdls_lock, flags); + list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) { + if ((jiffies - tdls_peer->rssi_jiffies) > + (MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) { + tdls_peer->rssi = 0; + tdls_peer->do_discover = true; + priv->check_tdls_tx = true; + } + + if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) || + !tdls_peer->rssi) && + tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) { + tdls_peer->tdls_status = TDLS_LINK_TEARDOWN; + dev_dbg(priv->adapter->dev, + "teardown TDLS link,peer=%pM rssi=%d\n", + tdls_peer->mac_addr, -tdls_peer->rssi); + tdls_peer->do_discover = true; + priv->check_tdls_tx = true; + cfg80211_tdls_oper_request(priv->netdev, + tdls_peer->mac_addr, + NL80211_TDLS_TEARDOWN, + reason, GFP_ATOMIC); + } else if (tdls_peer->rssi && + tdls_peer->rssi <= MWIFIEX_TDLS_RSSI_HIGH && + tdls_peer->tdls_status == TDLS_NOT_SETUP && + tdls_peer->failure_count < + MWIFIEX_TDLS_MAX_FAIL_COUNT) { + priv->check_tdls_tx = true; + tdls_peer->do_setup = true; + dev_dbg(priv->adapter->dev, + "check TDLS with peer=%pM rssi=%d\n", + tdls_peer->mac_addr, -tdls_peer->rssi); + } + } + spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); + + mod_timer(&priv->auto_tdls_timer, + jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S)); +} + +void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv) +{ + init_timer(&priv->auto_tdls_timer); + priv->auto_tdls_timer.function = mwifiex_check_auto_tdls; + priv->auto_tdls_timer.data = (unsigned long)priv; + priv->auto_tdls_timer_active = true; + mod_timer(&priv->auto_tdls_timer, + jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S)); +} + +void mwifiex_clean_auto_tdls(struct mwifiex_private *priv) +{ + if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && + priv->adapter->auto_tdls && + priv->bss_type == MWIFIEX_BSS_TYPE_STA) { + priv->auto_tdls_timer_active = false; + del_timer(&priv->auto_tdls_timer); + mwifiex_flush_auto_tdls_list(priv); + } +} diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index 96a2126cc44bf81da15f8c17d6dc7895cad5abce..6ae133333363a75d76961517b6174561e6bd7aa2 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -64,10 +64,6 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, else ret = mwifiex_process_sta_rx_packet(priv, skb); - /* Decrement RX pending counter for each packet */ - if (adapter->if_ops.data_complete) - adapter->if_ops.data_complete(adapter); - return ret; } EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet); @@ -207,3 +203,34 @@ done: } EXPORT_SYMBOL_GPL(mwifiex_write_data_complete); +void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, + void *event_body) +{ + struct tx_status_event *tx_status = (void *)priv->adapter->event_body; + struct sk_buff *ack_skb; + unsigned long flags; + struct mwifiex_txinfo *tx_info; + + if (!tx_status->tx_token_id) + return; + + spin_lock_irqsave(&priv->ack_status_lock, flags); + ack_skb = idr_find(&priv->ack_status_frames, tx_status->tx_token_id); + if (ack_skb) + idr_remove(&priv->ack_status_frames, tx_status->tx_token_id); + spin_unlock_irqrestore(&priv->ack_status_lock, flags); + + if (ack_skb) { + tx_info = MWIFIEX_SKB_TXCB(ack_skb); + + if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS) { + /* consumes ack_skb */ + skb_complete_wifi_ack(ack_skb, !tx_status->status); + } else { + cfg80211_mgmt_tx_status(priv->wdev, tx_info->cookie, + ack_skb->data, ack_skb->len, + !tx_status->status, GFP_ATOMIC); + dev_kfree_skb_any(ack_skb); + } + } +} diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index 300bab4380117076dadaf51b4eaa516b7f799044..0f347fdefa0a535c2359c2309ae7fb83fa506699 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -167,7 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv, ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail, params->beacon.tail_len); if (ht_ie) { - memcpy(&bss_cfg->ht_cap, ht_ie + 2, + memcpy(&bss_cfg->ht_cap, ht_ie, sizeof(struct ieee80211_ht_cap)); cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info); memset(&bss_cfg->ht_cap.mcs, 0, diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c index 7c2b97660a032ccdb424b5783e38c0680f5599c3..c54a537e31fbd2daa33da36e4d08a665778cf8be 100644 --- a/drivers/net/wireless/mwifiex/uap_event.c +++ b/drivers/net/wireless/mwifiex/uap_event.c @@ -110,6 +110,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac); mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac); } + mwifiex_wmm_del_peer_ra_list(priv, deauth_mac); mwifiex_del_sta_entry(priv, deauth_mac); break; case EVENT_UAP_BSS_IDLE: @@ -172,6 +173,10 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) return mwifiex_handle_event_ext_scan_report(priv, adapter->event_skb->data); break; + case EVENT_TX_STATUS_REPORT: + dev_dbg(adapter->dev, "event: TX_STATUS Report\n"); + mwifiex_parse_tx_status_event(priv, adapter->event_body); + break; default: dev_dbg(adapter->dev, "event: unknown event id: %#x\n", eventcause); diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index ec7309d096abaf5a11845eea72659374e318fce1..be3a203a529bcbad36610fd21adda816bc39bc5b 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -266,6 +266,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, struct rx_packet_hdr *rx_pkt_hdr; u16 rx_pkt_type; u8 ta[ETH_ALEN], pkt_type; + unsigned long flags; struct mwifiex_sta_node *node; uap_rx_pd = (struct uap_rxpd *)(skb->data); @@ -294,10 +295,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN); if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) { + spin_lock_irqsave(&priv->sta_list_spinlock, flags); node = mwifiex_get_sta_entry(priv, ta); if (node) node->rx_seq[uap_rx_pd->priority] = le16_to_cpu(uap_rx_pd->seq_num); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); } if (!priv->ap_11n_enabled || @@ -370,10 +373,16 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, txpd->bss_num = priv->bss_num; txpd->bss_type = priv->bss_type; txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len)); - txpd->priority = (u8)skb->priority; + txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb); + if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS || + tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) { + txpd->tx_token_id = tx_info->ack_frame_id; + txpd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS; + } + if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl)) /* * Set the priority specific tx_control field, setting of 0 will diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 4371e12b36f38af6e0946969288ce918dc8487b1..1b56495ec8724e55b8c1161b8f5ff900fee0040f 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -27,6 +27,11 @@ static struct mwifiex_if_ops usb_ops; static struct semaphore add_remove_card_sem; static struct usb_device_id mwifiex_usb_table[] = { + /* 8766 */ + {USB_DEVICE(USB8XXX_VID, USB8766_PID_1)}, + {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8766_PID_2, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, 0xff)}, /* 8797 */ {USB_DEVICE(USB8XXX_VID, USB8797_PID_1)}, {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8797_PID_2, @@ -125,8 +130,10 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, dev_err(dev, "DATA: skb->len too large\n"); return -1; } - skb_queue_tail(&adapter->usb_rx_data_q, skb); + + skb_queue_tail(&adapter->rx_data_q, skb); adapter->data_received = true; + atomic_inc(&adapter->rx_pending); break; default: dev_err(dev, "%s: unknown endport %#x\n", __func__, ep); @@ -176,7 +183,6 @@ static void mwifiex_usb_rx_complete(struct urb *urb) else skb_put(skb, recv_length - skb->len); - atomic_inc(&adapter->rx_pending); status = mwifiex_usb_recv(adapter, skb, context->ep); dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n", @@ -191,7 +197,6 @@ static void mwifiex_usb_rx_complete(struct urb *urb) if (card->rx_cmd_ep == context->ep) return; } else { - atomic_dec(&adapter->rx_pending); if (status == -1) dev_err(adapter->dev, "received data processing failed!\n"); @@ -222,7 +227,13 @@ setup_for_next: else size = MWIFIEX_RX_DATA_BUF_SIZE; - mwifiex_usb_submit_rx_urb(context, size); + if (card->rx_cmd_ep == context->ep) { + mwifiex_usb_submit_rx_urb(context, size); + } else { + context->skb = NULL; + if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING) + mwifiex_usb_submit_rx_urb(context, size); + } return; } @@ -348,10 +359,12 @@ static int mwifiex_usb_probe(struct usb_interface *intf, /* PID_1 is used for firmware downloading only */ switch (id_product) { + case USB8766_PID_1: case USB8797_PID_1: case USB8897_PID_1: card->usb_boot_state = USB8XXX_FW_DNLD; break; + case USB8766_PID_2: case USB8797_PID_2: case USB8897_PID_2: card->usb_boot_state = USB8XXX_FW_READY; @@ -780,6 +793,11 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME); break; + case USB8766_PID_1: + case USB8766_PID_2: + adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; + strcpy(adapter->fw_name, USB8766_DEFAULT_FW_NAME); + break; case USB8797_PID_1: case USB8797_PID_2: default: @@ -962,19 +980,11 @@ static void mwifiex_submit_rx_urb(struct mwifiex_adapter *adapter, u8 ep) static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter, struct sk_buff *skb) { - atomic_dec(&adapter->rx_pending); mwifiex_submit_rx_urb(adapter, MWIFIEX_USB_EP_CMD_EVENT); return 0; } -static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter) -{ - atomic_dec(&adapter->rx_pending); - - return 0; -} - /* This function wakes up the card. */ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) { @@ -986,6 +996,20 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) return 0; } +static void mwifiex_usb_submit_rem_rx_urbs(struct mwifiex_adapter *adapter) +{ + struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + int i; + struct urb_context *ctx; + + for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) { + if (card->rx_data_list[i].skb) + continue; + ctx = &card->rx_data_list[i]; + mwifiex_usb_submit_rx_urb(ctx, MWIFIEX_RX_DATA_BUF_SIZE); + } +} + static struct mwifiex_if_ops usb_ops = { .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, @@ -996,8 +1020,8 @@ static struct mwifiex_if_ops usb_ops = { .dnld_fw = mwifiex_usb_dnld_fw, .cmdrsp_complete = mwifiex_usb_cmd_event_complete, .event_complete = mwifiex_usb_cmd_event_complete, - .data_complete = mwifiex_usb_data_complete, .host_to_card = mwifiex_usb_host_to_card, + .submit_rem_rx_urbs = mwifiex_usb_submit_rem_rx_urbs, }; /* This function initializes the USB driver module. @@ -1048,5 +1072,6 @@ MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION); MODULE_VERSION(USB_VERSION); MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME); MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME); MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h index 4c41c2a193c553106f61010574a0e656c49cfc2f..a7cbba1355af7df25d9d3f899022884809aad3e6 100644 --- a/drivers/net/wireless/mwifiex/usb.h +++ b/drivers/net/wireless/mwifiex/usb.h @@ -24,6 +24,8 @@ #define USB8XXX_VID 0x1286 +#define USB8766_PID_1 0x2041 +#define USB8766_PID_2 0x2042 #define USB8797_PID_1 0x2043 #define USB8797_PID_2 0x2044 #define USB8897_PID_1 0x2045 @@ -37,6 +39,7 @@ #define MWIFIEX_RX_DATA_URB 6 #define MWIFIEX_USB_TIMEOUT 100 +#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin" #define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin" #define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin" diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index ec79c49de0975651a5e2cdebdd058fc312308537..b1768fbf98f2235a9c762b8af4a2fcc2fdf245a9 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -141,6 +141,38 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv, return 0; } +static int +mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len, + struct rxpd *rx_pd) +{ + u16 stype; + u8 category, action_code; + struct ieee80211_hdr *ieee_hdr = (void *)payload; + + stype = (le16_to_cpu(ieee_hdr->frame_control) & IEEE80211_FCTL_STYPE); + + switch (stype) { + case IEEE80211_STYPE_ACTION: + category = *(payload + sizeof(struct ieee80211_hdr)); + action_code = *(payload + sizeof(struct ieee80211_hdr) + 1); + if (category == WLAN_CATEGORY_PUBLIC && + action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) { + dev_dbg(priv->adapter->dev, + "TDLS discovery response %pM nf=%d, snr=%d\n", + ieee_hdr->addr2, rx_pd->nf, rx_pd->snr); + mwifiex_auto_tdls_update_peer_signal(priv, + ieee_hdr->addr2, + rx_pd->snr, + rx_pd->nf); + } + break; + default: + dev_dbg(priv->adapter->dev, + "unknown mgmt frame subytpe %#x\n", stype); + } + + return 0; +} /* * This function processes the received management packet and send it * to the kernel. @@ -151,6 +183,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, { struct rxpd *rx_pd; u16 pkt_len; + struct ieee80211_hdr *ieee_hdr; if (!skb) return -1; @@ -162,6 +195,11 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, pkt_len = le16_to_cpu(rx_pd->rx_pkt_length); + ieee_hdr = (void *)skb->data; + if (ieee80211_is_mgmt(ieee_hdr->frame_control)) { + mwifiex_parse_mgmt_packet(priv, (u8 *)ieee_hdr, + pkt_len, rx_pd); + } /* Remove address4 */ memmove(skb->data + sizeof(struct ieee80211_hdr_3addr), skb->data + sizeof(struct ieee80211_hdr), diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 94c98a86ebbec84bc83fe8c6c9f3e879c09df627..ffffd2c5a76e3d523930ac6760ec9b4aca97ec99 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -147,9 +147,6 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra) struct mwifiex_sta_node *node; unsigned long flags; - spin_lock_irqsave(&priv->sta_list_spinlock, flags); - node = mwifiex_get_sta_entry(priv, ra); - spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); for (i = 0; i < MAX_NUM_TID; ++i) { ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra); @@ -170,10 +167,13 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra) ra_list->is_11n_enabled = IS_11N_ENABLED(priv); } } else { + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + node = mwifiex_get_sta_entry(priv, ra); ra_list->is_11n_enabled = mwifiex_is_sta_11n_enabled(priv, node); if (ra_list->is_11n_enabled) ra_list->max_amsdu = node->max_amsdu; + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); } dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n", @@ -523,6 +523,13 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv) } } +static int mwifiex_free_ack_frame(int id, void *p, void *data) +{ + pr_warn("Have pending ack frames!\n"); + kfree_skb(p); + return 0; +} + /* * This function cleans up the Tx and Rx queues. * @@ -558,6 +565,9 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + + idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL); + idr_destroy(&priv->ack_status_frames); } /* @@ -600,6 +610,32 @@ mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr); } +/* + * This function deletes RA list nodes for given mac for all TIDs. + * Function also decrements TX pending count accordingly. + */ +void +mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr) +{ + struct mwifiex_ra_list_tbl *ra_list; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + + for (i = 0; i < MAX_NUM_TID; ++i) { + ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr); + + if (!ra_list) + continue; + mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list); + atomic_sub(ra_list->total_pkt_count, &priv->wmm.tx_pkts_queued); + list_del(&ra_list->list); + kfree(ra_list); + } + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); +} + /* * This function checks if a particular RA list node exists in a given TID * table index. diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index ef1104476bd8e1c8ee356630fcf3b32f73b0d2e6..b8d1e04aa9b94bd7bf2d8dde1539ff99443a4654 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -5548,7 +5548,9 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return rc; } -static void mwl8k_sw_scan_start(struct ieee80211_hw *hw) +static void mwl8k_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct mwl8k_priv *priv = hw->priv; u8 tmp; @@ -5565,7 +5567,8 @@ static void mwl8k_sw_scan_start(struct ieee80211_hw *hw) priv->sw_scan_start = true; } -static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw) +static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct mwl8k_priv *priv = hw->priv; u8 tmp; diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h deleted file mode 100644 index aedfaf24f38602954e498c70d81851927f3a4e01..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/p54/net2280.h +++ /dev/null @@ -1,451 +0,0 @@ -#ifndef NET2280_H -#define NET2280_H -/* - * NetChip 2280 high/full speed USB device controller. - * Unlike many such controllers, this one talks PCI. - */ - -/* - * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) - * Copyright (C) 2003 David Brownell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -/*-------------------------------------------------------------------------*/ - -/* NET2280 MEMORY MAPPED REGISTERS - * - * The register layout came from the chip documentation, and the bit - * number definitions were extracted from chip specification. - * - * Use the shift operator ('<<') to build bit masks, with readl/writel - * to access the registers through PCI. - */ - -/* main registers, BAR0 + 0x0000 */ -struct net2280_regs { - /* offset 0x0000 */ - __le32 devinit; -#define LOCAL_CLOCK_FREQUENCY 8 -#define FORCE_PCI_RESET 7 -#define PCI_ID 6 -#define PCI_ENABLE 5 -#define FIFO_SOFT_RESET 4 -#define CFG_SOFT_RESET 3 -#define PCI_SOFT_RESET 2 -#define USB_SOFT_RESET 1 -#define M8051_RESET 0 - __le32 eectl; -#define EEPROM_ADDRESS_WIDTH 23 -#define EEPROM_CHIP_SELECT_ACTIVE 22 -#define EEPROM_PRESENT 21 -#define EEPROM_VALID 20 -#define EEPROM_BUSY 19 -#define EEPROM_CHIP_SELECT_ENABLE 18 -#define EEPROM_BYTE_READ_START 17 -#define EEPROM_BYTE_WRITE_START 16 -#define EEPROM_READ_DATA 8 -#define EEPROM_WRITE_DATA 0 - __le32 eeclkfreq; - u32 _unused0; - /* offset 0x0010 */ - - __le32 pciirqenb0; /* interrupt PCI master ... */ -#define SETUP_PACKET_INTERRUPT_ENABLE 7 -#define ENDPOINT_F_INTERRUPT_ENABLE 6 -#define ENDPOINT_E_INTERRUPT_ENABLE 5 -#define ENDPOINT_D_INTERRUPT_ENABLE 4 -#define ENDPOINT_C_INTERRUPT_ENABLE 3 -#define ENDPOINT_B_INTERRUPT_ENABLE 2 -#define ENDPOINT_A_INTERRUPT_ENABLE 1 -#define ENDPOINT_0_INTERRUPT_ENABLE 0 - __le32 pciirqenb1; -#define PCI_INTERRUPT_ENABLE 31 -#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 -#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 -#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 -#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 -#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 -#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18 -#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 -#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 -#define GPIO_INTERRUPT_ENABLE 13 -#define DMA_D_INTERRUPT_ENABLE 12 -#define DMA_C_INTERRUPT_ENABLE 11 -#define DMA_B_INTERRUPT_ENABLE 10 -#define DMA_A_INTERRUPT_ENABLE 9 -#define EEPROM_DONE_INTERRUPT_ENABLE 8 -#define VBUS_INTERRUPT_ENABLE 7 -#define CONTROL_STATUS_INTERRUPT_ENABLE 6 -#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 -#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 -#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 -#define RESUME_INTERRUPT_ENABLE 1 -#define SOF_INTERRUPT_ENABLE 0 - __le32 cpu_irqenb0; /* ... or onboard 8051 */ -#define SETUP_PACKET_INTERRUPT_ENABLE 7 -#define ENDPOINT_F_INTERRUPT_ENABLE 6 -#define ENDPOINT_E_INTERRUPT_ENABLE 5 -#define ENDPOINT_D_INTERRUPT_ENABLE 4 -#define ENDPOINT_C_INTERRUPT_ENABLE 3 -#define ENDPOINT_B_INTERRUPT_ENABLE 2 -#define ENDPOINT_A_INTERRUPT_ENABLE 1 -#define ENDPOINT_0_INTERRUPT_ENABLE 0 - __le32 cpu_irqenb1; -#define CPU_INTERRUPT_ENABLE 31 -#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 -#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 -#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 -#define PCI_INTA_INTERRUPT_ENABLE 24 -#define PCI_PME_INTERRUPT_ENABLE 23 -#define PCI_SERR_INTERRUPT_ENABLE 22 -#define PCI_PERR_INTERRUPT_ENABLE 21 -#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 -#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 -#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 -#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 -#define GPIO_INTERRUPT_ENABLE 13 -#define DMA_D_INTERRUPT_ENABLE 12 -#define DMA_C_INTERRUPT_ENABLE 11 -#define DMA_B_INTERRUPT_ENABLE 10 -#define DMA_A_INTERRUPT_ENABLE 9 -#define EEPROM_DONE_INTERRUPT_ENABLE 8 -#define VBUS_INTERRUPT_ENABLE 7 -#define CONTROL_STATUS_INTERRUPT_ENABLE 6 -#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 -#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 -#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 -#define RESUME_INTERRUPT_ENABLE 1 -#define SOF_INTERRUPT_ENABLE 0 - - /* offset 0x0020 */ - u32 _unused1; - __le32 usbirqenb1; -#define USB_INTERRUPT_ENABLE 31 -#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 -#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 -#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 -#define PCI_INTA_INTERRUPT_ENABLE 24 -#define PCI_PME_INTERRUPT_ENABLE 23 -#define PCI_SERR_INTERRUPT_ENABLE 22 -#define PCI_PERR_INTERRUPT_ENABLE 21 -#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 -#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 -#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 -#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 -#define GPIO_INTERRUPT_ENABLE 13 -#define DMA_D_INTERRUPT_ENABLE 12 -#define DMA_C_INTERRUPT_ENABLE 11 -#define DMA_B_INTERRUPT_ENABLE 10 -#define DMA_A_INTERRUPT_ENABLE 9 -#define EEPROM_DONE_INTERRUPT_ENABLE 8 -#define VBUS_INTERRUPT_ENABLE 7 -#define CONTROL_STATUS_INTERRUPT_ENABLE 6 -#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 -#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 -#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 -#define RESUME_INTERRUPT_ENABLE 1 -#define SOF_INTERRUPT_ENABLE 0 - __le32 irqstat0; -#define INTA_ASSERTED 12 -#define SETUP_PACKET_INTERRUPT 7 -#define ENDPOINT_F_INTERRUPT 6 -#define ENDPOINT_E_INTERRUPT 5 -#define ENDPOINT_D_INTERRUPT 4 -#define ENDPOINT_C_INTERRUPT 3 -#define ENDPOINT_B_INTERRUPT 2 -#define ENDPOINT_A_INTERRUPT 1 -#define ENDPOINT_0_INTERRUPT 0 - __le32 irqstat1; -#define POWER_STATE_CHANGE_INTERRUPT 27 -#define PCI_ARBITER_TIMEOUT_INTERRUPT 26 -#define PCI_PARITY_ERROR_INTERRUPT 25 -#define PCI_INTA_INTERRUPT 24 -#define PCI_PME_INTERRUPT 23 -#define PCI_SERR_INTERRUPT 22 -#define PCI_PERR_INTERRUPT 21 -#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20 -#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19 -#define PCI_RETRY_ABORT_INTERRUPT 17 -#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16 -#define GPIO_INTERRUPT 13 -#define DMA_D_INTERRUPT 12 -#define DMA_C_INTERRUPT 11 -#define DMA_B_INTERRUPT 10 -#define DMA_A_INTERRUPT 9 -#define EEPROM_DONE_INTERRUPT 8 -#define VBUS_INTERRUPT 7 -#define CONTROL_STATUS_INTERRUPT 6 -#define ROOT_PORT_RESET_INTERRUPT 4 -#define SUSPEND_REQUEST_INTERRUPT 3 -#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2 -#define RESUME_INTERRUPT 1 -#define SOF_INTERRUPT 0 - /* offset 0x0030 */ - __le32 idxaddr; - __le32 idxdata; - __le32 fifoctl; -#define PCI_BASE2_RANGE 16 -#define IGNORE_FIFO_AVAILABILITY 3 -#define PCI_BASE2_SELECT 2 -#define FIFO_CONFIGURATION_SELECT 0 - u32 _unused2; - /* offset 0x0040 */ - __le32 memaddr; -#define START 28 -#define DIRECTION 27 -#define FIFO_DIAGNOSTIC_SELECT 24 -#define MEMORY_ADDRESS 0 - __le32 memdata0; - __le32 memdata1; - u32 _unused3; - /* offset 0x0050 */ - __le32 gpioctl; -#define GPIO3_LED_SELECT 12 -#define GPIO3_INTERRUPT_ENABLE 11 -#define GPIO2_INTERRUPT_ENABLE 10 -#define GPIO1_INTERRUPT_ENABLE 9 -#define GPIO0_INTERRUPT_ENABLE 8 -#define GPIO3_OUTPUT_ENABLE 7 -#define GPIO2_OUTPUT_ENABLE 6 -#define GPIO1_OUTPUT_ENABLE 5 -#define GPIO0_OUTPUT_ENABLE 4 -#define GPIO3_DATA 3 -#define GPIO2_DATA 2 -#define GPIO1_DATA 1 -#define GPIO0_DATA 0 - __le32 gpiostat; -#define GPIO3_INTERRUPT 3 -#define GPIO2_INTERRUPT 2 -#define GPIO1_INTERRUPT 1 -#define GPIO0_INTERRUPT 0 -} __packed; - -/* usb control, BAR0 + 0x0080 */ -struct net2280_usb_regs { - /* offset 0x0080 */ - __le32 stdrsp; -#define STALL_UNSUPPORTED_REQUESTS 31 -#define SET_TEST_MODE 16 -#define GET_OTHER_SPEED_CONFIGURATION 15 -#define GET_DEVICE_QUALIFIER 14 -#define SET_ADDRESS 13 -#define ENDPOINT_SET_CLEAR_HALT 12 -#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11 -#define GET_STRING_DESCRIPTOR_2 10 -#define GET_STRING_DESCRIPTOR_1 9 -#define GET_STRING_DESCRIPTOR_0 8 -#define GET_SET_INTERFACE 6 -#define GET_SET_CONFIGURATION 5 -#define GET_CONFIGURATION_DESCRIPTOR 4 -#define GET_DEVICE_DESCRIPTOR 3 -#define GET_ENDPOINT_STATUS 2 -#define GET_INTERFACE_STATUS 1 -#define GET_DEVICE_STATUS 0 - __le32 prodvendid; -#define PRODUCT_ID 16 -#define VENDOR_ID 0 - __le32 relnum; - __le32 usbctl; -#define SERIAL_NUMBER_INDEX 16 -#define PRODUCT_ID_STRING_ENABLE 13 -#define VENDOR_ID_STRING_ENABLE 12 -#define USB_ROOT_PORT_WAKEUP_ENABLE 11 -#define VBUS_PIN 10 -#define TIMED_DISCONNECT 9 -#define SUSPEND_IMMEDIATELY 7 -#define SELF_POWERED_USB_DEVICE 6 -#define REMOTE_WAKEUP_SUPPORT 5 -#define PME_POLARITY 4 -#define USB_DETECT_ENABLE 3 -#define PME_WAKEUP_ENABLE 2 -#define DEVICE_REMOTE_WAKEUP_ENABLE 1 -#define SELF_POWERED_STATUS 0 - /* offset 0x0090 */ - __le32 usbstat; -#define HIGH_SPEED 7 -#define FULL_SPEED 6 -#define GENERATE_RESUME 5 -#define GENERATE_DEVICE_REMOTE_WAKEUP 4 - __le32 xcvrdiag; -#define FORCE_HIGH_SPEED_MODE 31 -#define FORCE_FULL_SPEED_MODE 30 -#define USB_TEST_MODE 24 -#define LINE_STATE 16 -#define TRANSCEIVER_OPERATION_MODE 2 -#define TRANSCEIVER_SELECT 1 -#define TERMINATION_SELECT 0 - __le32 setup0123; - __le32 setup4567; - /* offset 0x0090 */ - u32 _unused0; - __le32 ouraddr; -#define FORCE_IMMEDIATE 7 -#define OUR_USB_ADDRESS 0 - __le32 ourconfig; -} __packed; - -/* pci control, BAR0 + 0x0100 */ -struct net2280_pci_regs { - /* offset 0x0100 */ - __le32 pcimstctl; -#define PCI_ARBITER_PARK_SELECT 13 -#define PCI_MULTI LEVEL_ARBITER 12 -#define PCI_RETRY_ABORT_ENABLE 11 -#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10 -#define DMA_READ_MULTIPLE_ENABLE 9 -#define DMA_READ_LINE_ENABLE 8 -#define PCI_MASTER_COMMAND_SELECT 6 -#define MEM_READ_OR_WRITE 0 -#define IO_READ_OR_WRITE 1 -#define CFG_READ_OR_WRITE 2 -#define PCI_MASTER_START 5 -#define PCI_MASTER_READ_WRITE 4 -#define PCI_MASTER_WRITE 0 -#define PCI_MASTER_READ 1 -#define PCI_MASTER_BYTE_WRITE_ENABLES 0 - __le32 pcimstaddr; - __le32 pcimstdata; - __le32 pcimststat; -#define PCI_ARBITER_CLEAR 2 -#define PCI_EXTERNAL_ARBITER 1 -#define PCI_HOST_MODE 0 -} __packed; - -/* dma control, BAR0 + 0x0180 ... array of four structs like this, - * for channels 0..3. see also struct net2280_dma: descriptor - * that can be loaded into some of these registers. - */ -struct net2280_dma_regs { /* [11.7] */ - /* offset 0x0180, 0x01a0, 0x01c0, 0x01e0, */ - __le32 dmactl; -#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25 -#define DMA_CLEAR_COUNT_ENABLE 21 -#define DESCRIPTOR_POLLING_RATE 19 -#define POLL_CONTINUOUS 0 -#define POLL_1_USEC 1 -#define POLL_100_USEC 2 -#define POLL_1_MSEC 3 -#define DMA_VALID_BIT_POLLING_ENABLE 18 -#define DMA_VALID_BIT_ENABLE 17 -#define DMA_SCATTER_GATHER_ENABLE 16 -#define DMA_OUT_AUTO_START_ENABLE 4 -#define DMA_PREEMPT_ENABLE 3 -#define DMA_FIFO_VALIDATE 2 -#define DMA_ENABLE 1 -#define DMA_ADDRESS_HOLD 0 - __le32 dmastat; -#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25 -#define DMA_TRANSACTION_DONE_INTERRUPT 24 -#define DMA_ABORT 1 -#define DMA_START 0 - u32 _unused0[2]; - /* offset 0x0190, 0x01b0, 0x01d0, 0x01f0, */ - __le32 dmacount; -#define VALID_BIT 31 -#define DMA_DIRECTION 30 -#define DMA_DONE_INTERRUPT_ENABLE 29 -#define END_OF_CHAIN 28 -#define DMA_BYTE_COUNT_MASK ((1<<24)-1) -#define DMA_BYTE_COUNT 0 - __le32 dmaaddr; - __le32 dmadesc; - u32 _unused1; -} __packed; - -/* dedicated endpoint registers, BAR0 + 0x0200 */ - -struct net2280_dep_regs { /* [11.8] */ - /* offset 0x0200, 0x0210, 0x220, 0x230, 0x240 */ - __le32 dep_cfg; - /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */ - __le32 dep_rsp; - u32 _unused[2]; -} __packed; - -/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs - * like this, for ep0 then the configurable endpoints A..F - * ep0 reserved for control; E and F have only 64 bytes of fifo - */ -struct net2280_ep_regs { /* [11.9] */ - /* offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 */ - __le32 ep_cfg; -#define ENDPOINT_BYTE_COUNT 16 -#define ENDPOINT_ENABLE 10 -#define ENDPOINT_TYPE 8 -#define ENDPOINT_DIRECTION 7 -#define ENDPOINT_NUMBER 0 - __le32 ep_rsp; -#define SET_NAK_OUT_PACKETS 15 -#define SET_EP_HIDE_STATUS_PHASE 14 -#define SET_EP_FORCE_CRC_ERROR 13 -#define SET_INTERRUPT_MODE 12 -#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11 -#define SET_NAK_OUT_PACKETS_MODE 10 -#define SET_ENDPOINT_TOGGLE 9 -#define SET_ENDPOINT_HALT 8 -#define CLEAR_NAK_OUT_PACKETS 7 -#define CLEAR_EP_HIDE_STATUS_PHASE 6 -#define CLEAR_EP_FORCE_CRC_ERROR 5 -#define CLEAR_INTERRUPT_MODE 4 -#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3 -#define CLEAR_NAK_OUT_PACKETS_MODE 2 -#define CLEAR_ENDPOINT_TOGGLE 1 -#define CLEAR_ENDPOINT_HALT 0 - __le32 ep_irqenb; -#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6 -#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5 -#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3 -#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2 -#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1 -#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0 - __le32 ep_stat; -#define FIFO_VALID_COUNT 24 -#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22 -#define TIMEOUT 21 -#define USB_STALL_SENT 20 -#define USB_IN_NAK_SENT 19 -#define USB_IN_ACK_RCVD 18 -#define USB_OUT_PING_NAK_SENT 17 -#define USB_OUT_ACK_SENT 16 -#define FIFO_OVERFLOW 13 -#define FIFO_UNDERFLOW 12 -#define FIFO_FULL 11 -#define FIFO_EMPTY 10 -#define FIFO_FLUSH 9 -#define SHORT_PACKET_OUT_DONE_INTERRUPT 6 -#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5 -#define NAK_OUT_PACKETS 4 -#define DATA_PACKET_RECEIVED_INTERRUPT 3 -#define DATA_PACKET_TRANSMITTED_INTERRUPT 2 -#define DATA_OUT_PING_TOKEN_INTERRUPT 1 -#define DATA_IN_TOKEN_INTERRUPT 0 - /* offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 */ - __le32 ep_avail; - __le32 ep_data; - u32 _unused0[2]; -} __packed; - -struct net2280_reg_write { - __le16 port; - __le32 addr; - __le32 val; -} __packed; - -struct net2280_reg_read { - __le16 port; - __le32 addr; -} __packed; -#endif /* NET2280_H */ diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h index d273be7272b96b68d319ef1d0502dfe2901c12e8..a5f5f0fea3bde03536f612d3c5192071c1d634ec 100644 --- a/drivers/net/wireless/p54/p54usb.h +++ b/drivers/net/wireless/p54/p54usb.h @@ -16,7 +16,7 @@ /* for isl3886 register definitions used on ver 1 devices */ #include "p54pci.h" -#include "net2280.h" +#include /* pci */ #define NET2280_BASE 0x10000000 @@ -93,6 +93,17 @@ enum net2280_op_type { NET2280_DEV_CFG_U16 = 0x0883 }; +struct net2280_reg_write { + __le16 port; + __le32 addr; + __le32 val; +} __packed; + +struct net2280_reg_read { + __le16 port; + __le32 addr; +} __packed; + #define P54U_FW_BLOCK 2048 #define X2_SIGNATURE "x2 " diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index d849d590de250b915ddda53ce64c703beef3215e..05c64597838d6610c876ad548b2b6b6f8c536c18 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -47,7 +47,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, - * and we will try maximal REGISTER_BUSY_COUNT times to access + * and we will try maximal REGISTER_USB_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, @@ -62,7 +62,7 @@ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, __le16 reg; rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, - ®, sizeof(reg), REGISTER_TIMEOUT); + ®, sizeof(reg)); *value = le16_to_cpu(reg); } @@ -83,8 +83,7 @@ static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev, { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, - value, length, - REGISTER_TIMEOUT16(length)); + value, length); } static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, @@ -94,7 +93,7 @@ static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, - ®, sizeof(reg), REGISTER_TIMEOUT); + ®, sizeof(reg)); } static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, @@ -113,8 +112,7 @@ static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, - value, length, - REGISTER_TIMEOUT16(length)); + value, length); } static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev, @@ -124,7 +122,7 @@ static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev, { unsigned int i; - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { rt2500usb_register_read_lock(rt2x00dev, offset, reg); if (!rt2x00_get_field16(*reg, field)) return 1; @@ -906,7 +904,7 @@ static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) unsigned int i; u8 value; - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { rt2500usb_bbp_read(rt2x00dev, 0, &value); if ((value != 0xff) && (value != 0x00)) return 0; @@ -1025,7 +1023,7 @@ static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, * We must wait until the register indicates that the * device has entered the correct state. */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { rt2500usb_register_read(rt2x00dev, MAC_CSR17, ®2); bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE); rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 9f57a2db791cf879fd9780b65926df300e461f92..81ee481487cf8b648ea4640b5f2114919c99ccd2 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -4119,7 +4119,20 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev, * expected. We adjust it, based on TSSI reference and boundaries values * provided in EEPROM. */ - delta += rt2800_get_gain_calibration_delta(rt2x00dev); + switch (rt2x00dev->chip.rt) { + case RT2860: + case RT2872: + case RT2883: + case RT3070: + case RT3071: + case RT3090: + case RT3572: + delta += rt2800_get_gain_calibration_delta(rt2x00dev); + break; + default: + /* TODO: temperature compensation code for other chips. */ + break; + } /* * Decrease power according to user settings, on devices with unknown @@ -4136,25 +4149,19 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev, * TODO: we do not use +6 dBm option to do not increase power beyond * regulatory limit, however this could be utilized for devices with * CAPABILITY_POWER_LIMIT. - * - * TODO: add different temperature compensation code for RT3290 & RT5390 - * to allow to use BBP_R1 for those chips. - */ - if (!rt2x00_rt(rt2x00dev, RT3290) && - !rt2x00_rt(rt2x00dev, RT5390)) { - rt2800_bbp_read(rt2x00dev, 1, &r1); - if (delta <= -12) { - power_ctrl = 2; - delta += 12; - } else if (delta <= -6) { - power_ctrl = 1; - delta += 6; - } else { - power_ctrl = 0; - } - rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); - rt2800_bbp_write(rt2x00dev, 1, r1); + */ + if (delta <= -12) { + power_ctrl = 2; + delta += 12; + } else if (delta <= -6) { + power_ctrl = 1; + delta += 6; + } else { + power_ctrl = 0; } + rt2800_bbp_read(rt2x00dev, 1, &r1); + rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); + rt2800_bbp_write(rt2x00dev, 1, r1); offset = TX_PWR_CFG_0; diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index d13f25cd70d5ea6d81f45dfeadd6273f4e68e86c..9bb398bed9bb68ba133d702c2e5be8b0b089a8ae 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -1019,9 +1019,12 @@ struct rt2x00_bar_list_entry { * Register defines. * Some registers require multiple attempts before success, * in those cases REGISTER_BUSY_COUNT attempts should be - * taken with a REGISTER_BUSY_DELAY interval. + * taken with a REGISTER_BUSY_DELAY interval. Due to USB + * bus delays, we do not have to loop so many times to wait + * for valid register value on that bus. */ #define REGISTER_BUSY_COUNT 100 +#define REGISTER_USB_BUSY_COUNT 20 #define REGISTER_BUSY_DELAY 100 /* @@ -1437,8 +1440,11 @@ int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw); -void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw); +void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr); +void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int rt2x00mac_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index ad6e5a8d1e10fbc06dac3895288168f1d7d2fb65..cb40245a06959cc4a91cffa7ce5cad4a39f6df21 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -568,7 +568,9 @@ int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove); -void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw) +void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct rt2x00_dev *rt2x00dev = hw->priv; set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); @@ -576,7 +578,8 @@ void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw) } EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start); -void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw) +void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 86c43d112a4b7d4f25bfbf123b7cbbfb98bd75b9..892270dd3e7b898c12acee8ccc2ef262da775f6d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -42,37 +42,27 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, { struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); int status; - unsigned int i; unsigned int pipe = (requesttype == USB_VENDOR_REQUEST_IN) ? usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); + unsigned long expire = jiffies + msecs_to_jiffies(timeout); if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return -ENODEV; - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + do { status = usb_control_msg(usb_dev, pipe, request, requesttype, value, offset, buffer, buffer_length, - timeout); + timeout / 2); if (status >= 0) return 0; - /* - * Check for errors - * -ENODEV: Device has disappeared, no point continuing. - * All other errors: Try again. - */ - else if (status == -ENODEV) { + if (status == -ENODEV) { + /* Device has disappeared. */ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); break; } - } - - /* If the port is powered down, we get a -EPROTO error, and this - * leads to a endless loop. So just say that the device is gone. - */ - if (status == -EPROTO) - clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); + } while (time_before(jiffies, expire)); rt2x00_err(rt2x00dev, "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n", @@ -116,7 +106,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock); int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, void *buffer, - const u16 buffer_length, const int timeout) + const u16 buffer_length) { int status = 0; unsigned char *tb; @@ -131,7 +121,7 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, bsize = min_t(u16, CSR_CACHE_SIZE, len); status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request, requesttype, off, tb, - bsize, timeout); + bsize, REGISTER_TIMEOUT); tb += bsize; len -= bsize; @@ -154,7 +144,7 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return -ENODEV; - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { rt2x00usb_register_read_lock(rt2x00dev, offset, reg); if (!rt2x00_get_field32(*reg, field)) return 1; diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h index 831b65f93feb27206f816d76c115eba6dc9d8c45..8f85fbd5f237eff576e343009d1aeefe26a39f02 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.h +++ b/drivers/net/wireless/rt2x00/rt2x00usb.h @@ -33,27 +33,14 @@ }) /* - * For USB vendor requests we need to pass a timeout - * time in ms, for this we use the REGISTER_TIMEOUT, - * however when loading firmware a higher value is - * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE. + * For USB vendor requests we need to pass a timeout time in ms, for this we + * use the REGISTER_TIMEOUT, however when loading firmware or read EEPROM + * a higher value is required. In that case we use the REGISTER_TIMEOUT_FIRMWARE + * and EEPROM_TIMEOUT. */ -#define REGISTER_TIMEOUT 500 +#define REGISTER_TIMEOUT 100 #define REGISTER_TIMEOUT_FIRMWARE 1000 - -/** - * REGISTER_TIMEOUT16 - Determine the timeout for 16bit register access - * @__datalen: Data length - */ -#define REGISTER_TIMEOUT16(__datalen) \ - ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u16)) ) - -/** - * REGISTER_TIMEOUT32 - Determine the timeout for 32bit register access - * @__datalen: Data length - */ -#define REGISTER_TIMEOUT32(__datalen) \ - ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u32)) ) +#define EEPROM_TIMEOUT 2000 /* * Cache size @@ -126,7 +113,6 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, * @offset: Register offset to perform action on * @buffer: Buffer where information will be read/written to by device * @buffer_length: Size of &buffer - * @timeout: Operation timeout * * This function will use a previously with kmalloc allocated cache * to communicate with the device. The contents of the buffer pointer @@ -139,7 +125,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, void *buffer, - const u16 buffer_length, const int timeout); + const u16 buffer_length); /** * rt2x00usb_vendor_request_buff - Send register command to device (buffered) @@ -197,8 +183,7 @@ static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev, { return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ, USB_VENDOR_REQUEST_IN, 0, 0, - eeprom, length, - REGISTER_TIMEOUT16(length)); + eeprom, length, EEPROM_TIMEOUT); } /** @@ -217,7 +202,7 @@ static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev, __le32 reg; rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, - ®, sizeof(reg), REGISTER_TIMEOUT); + ®, sizeof(reg)); *value = le32_to_cpu(reg); } @@ -257,8 +242,7 @@ static inline void rt2x00usb_register_multiread(struct rt2x00_dev *rt2x00dev, { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, - value, length, - REGISTER_TIMEOUT32(length)); + value, length); } /** @@ -277,7 +261,7 @@ static inline void rt2x00usb_register_write(struct rt2x00_dev *rt2x00dev, __le32 reg = cpu_to_le32(value); rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, - ®, sizeof(reg), REGISTER_TIMEOUT); + ®, sizeof(reg)); } /** @@ -316,8 +300,7 @@ static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, - (void *)value, length, - REGISTER_TIMEOUT32(length)); + (void *)value, length); } /** diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 95724ff9c7268700628866f433b66e82a8a9f7c4..a5458cf01fb26be50976c3dd4699e476e540c3c5 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -1295,7 +1295,7 @@ static int rt73usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) unsigned int i; u8 value; - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { rt73usb_bbp_read(rt2x00dev, 0, &value); if ((value != 0xff) && (value != 0x00)) return 0; diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 07dae0d44abc09a78025d71e32570dc68b50194c..5fc6f52641bdf61bcf31739fc47f7f1d78596f87 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -786,6 +786,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, u64 multicast) { + bool update_rcr = false; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -806,6 +807,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "Disable receive multicast frame\n"); } + update_rcr = true; } if (changed_flags & FIF_FCSFAIL) { @@ -818,6 +820,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "Disable receive FCS error frame\n"); } + if (!update_rcr) + update_rcr = true; } /* if ssid not set to hw don't check bssid @@ -832,6 +836,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, rtlpriv->cfg->ops->set_chk_bssid(hw, false); else rtlpriv->cfg->ops->set_chk_bssid(hw, true); + if (update_rcr) + update_rcr = false; } } @@ -846,6 +852,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "Disable receive control frame.\n"); } + if (!update_rcr) + update_rcr = true; } if (changed_flags & FIF_OTHER_BSS) { @@ -858,7 +866,13 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "Disable receive other BSS's frame.\n"); } + if (!update_rcr) + update_rcr = true; } + + if (update_rcr) + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(&mac->rx_conf)); } static int rtl_op_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1361,7 +1375,9 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw, return 0; } -static void rtl_op_sw_scan_start(struct ieee80211_hw *hw) +static void rtl_op_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -1396,7 +1412,8 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw) rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP_BAND0); } -static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw) +static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index 55357d69397a3370d8f4081ecec3d578daa3b8b9..d2ec5160bbf05dd5be42d70a4780292dc8768d58 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -1287,6 +1287,7 @@ void rtl92ce_enable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); + rtlpci->irq_enabled = true; } void rtl92ce_disable_interrupt(struct ieee80211_hw *hw) @@ -1296,7 +1297,7 @@ void rtl92ce_disable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED); rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED); - synchronize_irq(rtlpci->pdev->irq); + rtlpci->irq_enabled = false; } static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 46ea07605eb47f449f7850dea3b8b0862555ffde..dd5aa089126a82c5bf7ee791756f1a03a177a4b8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -228,6 +228,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = { .led_control = rtl92ce_led_control, .set_desc = rtl92ce_set_desc, .get_desc = rtl92ce_get_desc, + .is_tx_desc_closed = rtl92ce_is_tx_desc_closed, .tx_polling = rtl92ce_tx_polling, .enable_hw_sec = rtl92ce_enable_hw_security_config, .set_key = rtl92ce_set_key, @@ -271,6 +272,8 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = { .maps[MAC_RCR_ACRC32] = ACRC32, .maps[MAC_RCR_ACF] = ACF, .maps[MAC_RCR_AAP] = AAP, + .maps[MAC_HIMR] = REG_HIMR, + .maps[MAC_HIMRE] = REG_HIMRE, .maps[EFUSE_TEST] = REG_EFUSE_TEST, .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c index dc3d20b17a265479906fa9d6777cc6e29e533f34..e88dcd0e0af17499108057b94f77e25f3ffa9dfc 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c @@ -720,16 +720,15 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name) break; } } else { - struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; switch (desc_name) { case HW_DESC_OWN: - ret = GET_RX_DESC_OWN(pdesc); + ret = GET_RX_DESC_OWN(p_desc); break; case HW_DESC_RXPKT_LEN: - ret = GET_RX_DESC_PKT_LEN(pdesc); + ret = GET_RX_DESC_PKT_LEN(p_desc); break; case HW_DESC_RXBUFF_ADDR: - ret = GET_RX_STATUS_DESC_BUFF_ADDR(pdesc); + ret = GET_RX_DESC_BUFF_ADDR(p_desc); break; default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", @@ -740,6 +739,23 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name) return ret; } +bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8)rtl92ce_get_desc(entry, true, HW_DESC_OWN); + + /*beacon packet will only use the first + *descriptor defautly,and the own may not + *be cleared by the hardware + */ + if (own) + return false; + return true; +} + void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h index 9a39ec4204dda77ef6e268be0396d3399ffc33f8..4bec4b07e3e017ceee47755dac2e8745a74df4db 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h @@ -723,6 +723,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val); u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name); +bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index); void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool b_firstseg, bool b_lastseg, diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile index 11952b99daf8991626a23f6ca7d48f1f03b364ca..0315eeda9b600aab5a28fa6d27d8c63fe80e27fb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile @@ -1,6 +1,3 @@ -obj-m := rtl8192ee.o - - rtl8192ee-objs := \ dm.o \ fw.o \ @@ -14,6 +11,6 @@ rtl8192ee-objs := \ trx.o \ -obj-$(CONFIG_RTL8821AE) += rtl8192ee.o +obj-$(CONFIG_RTL8192EE) += rtl8192ee.o ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile index 9c34a85fdb89f1acf280e20722d81ccb2b1b0511..6220672a96f44ffc539ccdc2325876b96dc6895c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile @@ -1,6 +1,3 @@ -obj-m := rtl8723ae.o - - rtl8723ae-objs := \ dm.o \ fw.o \ diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile index 59e416abd93ab014ea62b07643b3d839689f4297..a77c3410279275320eef1195e7fd6c78556631da 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile +++ b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile @@ -1,6 +1,3 @@ -obj-m := rtl8723be.o - - rtl8723be-objs := \ dm.o \ fw.o \ diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile index 87ad604a1eb3267a4756e4d36bcdea19e8502a5f..f7a26f71197e01db7fe089dc6ba9de940986ba1b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile @@ -1,6 +1,3 @@ -obj-m := rtl8821ae.o - - rtl8821ae-objs := \ dm.o \ fw.o \ diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c index 1e9570fa874fe6309962d2de2251fc923da424cb..9b4d8a63791511ae54b143dae0209f65cd2cb387 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c @@ -800,7 +800,7 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in Band 2.4G,Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n", rate_section, path, txnum); break; - }; + } } else if (band == BAND_ON_5G) { switch (rate_section) { case OFDM: @@ -823,7 +823,7 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n", rate_section, path, txnum); break; - }; + } } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band); @@ -870,7 +870,7 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n", rate_section, path, txnum); break; - }; + } } else if (band == BAND_ON_5G) { switch (rate_section) { case OFDM: @@ -893,7 +893,7 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n", rate_section, path, txnum); break; - }; + } } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band); @@ -3746,7 +3746,7 @@ static void _rtl8821ae_iqk_tx_fill_iqc(struct ieee80211_hw *hw, break; default: break; - }; + } } static void _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw, @@ -3767,7 +3767,7 @@ static void _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw, break; default: break; - }; + } } #define cal_num 10 diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 38234851457e51cd22829a1ea25c7e26cc584c21..0b30a7b4d6630f463b5075a554d5997e228c95b3 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -1029,7 +1029,7 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw, goto out_sleep; } - skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len, + skb = ieee80211_probereq_get(wl->hw, wl->vif->addr, ssid, ssid_len, req->ie_len); if (!skb) { ret = -ENOMEM; diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 05604ee312249cb9f493b270106be2d7ba38913b..b82661962d337f2e2f785ebd10ad1f75f003d18c 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -64,6 +64,9 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf, id != CMD_STOP_FWLOGGER)) return -EIO; + if (WARN_ON_ONCE(len < sizeof(*cmd))) + return -EIO; + cmd = buf; cmd->id = cpu_to_le16(id); cmd->status = 0; @@ -128,8 +131,9 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf, * send command to fw and return cmd status on success * valid_rets contains a bitmap of allowed error codes */ -int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len, - size_t res_len, unsigned long valid_rets) +static int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, + size_t len, size_t res_len, + unsigned long valid_rets) { int ret = __wlcore_cmd_send(wl, id, buf, len, res_len); @@ -150,7 +154,6 @@ fail: wl12xx_queue_recovery_work(wl); return ret; } -EXPORT_SYMBOL_GPL(wl1271_cmd_send); /* * wrapper for wlcore_cmd_send that accept only CMD_STATUS_SUCCESS @@ -165,6 +168,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, return ret; return 0; } +EXPORT_SYMBOL_GPL(wl1271_cmd_send); /* * Poll the mailbox event field until any of the bits in the mask is set or a @@ -891,6 +895,9 @@ int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf, wl1271_debug(DEBUG_CMD, "cmd configure (%d)", id); + if (WARN_ON_ONCE(len < sizeof(*acx))) + return -EIO; + acx->id = cpu_to_le16(id); /* payload length, does not include any headers */ @@ -1138,7 +1145,7 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, wl1271_debug(DEBUG_SCAN, "build probe request band %d", band); - skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len, + skb = ieee80211_probereq_get(wl->hw, vif->addr, ssid, ssid_len, ie0_len + ie1_len); if (!skb) { ret = -ENOMEM; diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h index ca6a28b03f8f31b5b47b23b8db982df4dbf78f84..453684a71d303db472cb3b5b566e40ae9badae4c 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.h +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -31,8 +31,6 @@ struct acx_header; int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, size_t res_len); -int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len, - size_t res_len, unsigned long valid_rets); int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, u8 *role_id); int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id); diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index 16d10281798d764ad8fb69f73cab6de0c2ff10d0..5153640f45323ac56eb9b662bdb08415e447562b 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -259,10 +259,7 @@ void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap) &wlvif->connection_loss_work, msecs_to_jiffies(delay)); - ieee80211_cqm_rssi_notify( - vif, - NL80211_CQM_RSSI_BEACON_LOSS_EVENT, - GFP_KERNEL); + ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL); } } EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 575c8f6d4009477dc6430b7eff91ab6d8a60e080..6ad3fcedab9b29fa92c9fb37eb65037a5b6d6f0f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5177,10 +5177,11 @@ out: } static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, struct ieee80211_channel_switch *ch_switch) { struct wl1271 *wl = hw->priv; - struct wl12xx_vif *wlvif; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch"); @@ -5190,14 +5191,8 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); if (unlikely(wl->state == WLCORE_STATE_OFF)) { - wl12xx_for_each_wlvif_sta(wl, wlvif) { - struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); - - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - continue; - + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ieee80211_chswitch_done(vif, false); - } goto out; } else if (unlikely(wl->state != WLCORE_STATE_ON)) { goto out; @@ -5208,11 +5203,9 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, goto out; /* TODO: change mac80211 to pass vif as param */ - wl12xx_for_each_wlvif_sta(wl, wlvif) { - unsigned long delay_usec; - if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - continue; + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { + unsigned long delay_usec; ret = wl->ops->channel_switch(wl, wlvif, ch_switch); if (ret) @@ -5222,10 +5215,10 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, /* indicate failure 5 seconds after channel switch time */ delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) * - ch_switch->count; + ch_switch->count; ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work, - usecs_to_jiffies(delay_usec) + - msecs_to_jiffies(5000)); + usecs_to_jiffies(delay_usec) + + msecs_to_jiffies(5000)); } out_sleep: diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 895fe84011e7ea4a2138b33d9e84ab802f1142f6..a6a32d337bbbcf0440dae386117c4dfb3b6b901c 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -235,10 +235,10 @@ static void xenvif_down(struct xenvif *vif) for (queue_index = 0; queue_index < num_queues; ++queue_index) { queue = &vif->queues[queue_index]; - napi_disable(&queue->napi); disable_irq(queue->tx_irq); if (queue->tx_irq != queue->rx_irq) disable_irq(queue->rx_irq); + napi_disable(&queue->napi); del_timer_sync(&queue->credit_timeout); } } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 6563f0713fc0f5e324a18e2d87ba1f4614e1a008..4a509f715fe8be2f8ff70f7dbf87dce93631caa9 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -82,6 +82,16 @@ MODULE_PARM_DESC(max_queues, static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; module_param(fatal_skb_slots, uint, 0444); +/* The amount to copy out of the first guest Tx slot into the skb's + * linear area. If the first slot has more data, it will be mapped + * and put into the first frag. + * + * This is sized to avoid pulling headers from the frags for most + * TCP/IP packets. + */ +#define XEN_NETBACK_TX_COPY_LEN 128 + + static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, u8 status); @@ -125,13 +135,6 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) pending_tx_info[0]); } -/* This is a miniumum size for the linear area to avoid lots of - * calls to __pskb_pull_tail() as we set up checksum offsets. The - * value 128 was chosen as it covers all IPv4 and most likely - * IPv6 headers. - */ -#define PKT_PROT_LEN 128 - static u16 frag_get_pending_idx(skb_frag_t *frag) { return (u16)frag->page_offset; @@ -1446,9 +1449,9 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, index = pending_index(queue->pending_cons); pending_idx = queue->pending_ring[index]; - data_len = (txreq.size > PKT_PROT_LEN && + data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? - PKT_PROT_LEN : txreq.size; + XEN_NETBACK_TX_COPY_LEN : txreq.size; skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { @@ -1550,7 +1553,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s unsigned int len; BUG_ON(i >= MAX_SKB_FRAGS); - page = alloc_page(GFP_ATOMIC|__GFP_COLD); + page = alloc_page(GFP_ATOMIC); if (!page) { int j; skb->truesize += skb->data_len; @@ -1653,11 +1656,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) } } - if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { - int target = min_t(int, skb->len, PKT_PROT_LEN); - __pskb_pull_tail(skb, target - skb_headlen(skb)); - } - skb->dev = queue->vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index fab0d4b42f58fca511dc447b62ea91607732ba81..d44cd19169bdd64a6044d1fc4fcf6a9a4a7ab357 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -404,6 +404,7 @@ static int backend_create_xenvif(struct backend_info *be) int err; long handle; struct xenbus_device *dev = be->dev; + struct xenvif *vif; if (be->vif != NULL) return 0; @@ -414,13 +415,13 @@ static int backend_create_xenvif(struct backend_info *be) return (err < 0) ? err : -EINVAL; } - be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); - if (IS_ERR(be->vif)) { - err = PTR_ERR(be->vif); - be->vif = NULL; + vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); + if (IS_ERR(vif)) { + err = PTR_ERR(vif); xenbus_dev_fatal(dev, err, "creating interface"); return err; } + be->vif = vif; kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); return 0; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index ece8d1804d13606c71b8f39e90851cb8b82de8ac..2f0a9ce9ff737656798fc146ba995d46fe669231 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -77,7 +77,9 @@ struct netfront_cb { #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) -#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) + +/* Minimum number of Rx slots (includes slot for GSO metadata). */ +#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) /* Queue name is interface name with "-qNNN" appended */ #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) @@ -137,13 +139,6 @@ struct netfront_queue { struct xen_netif_rx_front_ring rx; int rx_ring_ref; - /* Receive-ring batched refills. */ -#define RX_MIN_TARGET 8 -#define RX_DFL_MIN_TARGET 64 -#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) - unsigned rx_min_target, rx_max_target, rx_target; - struct sk_buff_head rx_batch; - struct timer_list rx_refill_timer; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; @@ -251,7 +246,7 @@ static void rx_refill_timeout(unsigned long data) static int netfront_tx_slot_available(struct netfront_queue *queue) { return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < - (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); + (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); } static void xennet_maybe_wake_tx(struct netfront_queue *queue) @@ -265,77 +260,55 @@ static void xennet_maybe_wake_tx(struct netfront_queue *queue) netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); } -static void xennet_alloc_rx_buffers(struct netfront_queue *queue) + +static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) { - unsigned short id; struct sk_buff *skb; struct page *page; - int i, batch_target, notify; - RING_IDX req_prod = queue->rx.req_prod_pvt; - grant_ref_t ref; - unsigned long pfn; - void *vaddr; - struct xen_netif_rx_request *req; - if (unlikely(!netif_carrier_ok(queue->info->netdev))) - return; + skb = __netdev_alloc_skb(queue->info->netdev, + RX_COPY_THRESHOLD + NET_IP_ALIGN, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; - /* - * Allocate skbuffs greedily, even though we batch updates to the - * receive ring. This creates a less bursty demand on the memory - * allocator, so should reduce the chance of failed allocation requests - * both for ourself and for other kernel subsystems. - */ - batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons); - for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) { - skb = __netdev_alloc_skb(queue->info->netdev, - RX_COPY_THRESHOLD + NET_IP_ALIGN, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) - goto no_skb; - - /* Align ip header to a 16 bytes boundary */ - skb_reserve(skb, NET_IP_ALIGN); - - page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); - if (!page) { - kfree_skb(skb); -no_skb: - /* Could not allocate any skbuffs. Try again later. */ - mod_timer(&queue->rx_refill_timer, - jiffies + (HZ/10)); - - /* Any skbuffs queued for refill? Force them out. */ - if (i != 0) - goto refill; - break; - } - - skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); - __skb_queue_tail(&queue->rx_batch, skb); + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (!page) { + kfree_skb(skb); + return NULL; } + skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); + + /* Align ip header to a 16 bytes boundary */ + skb_reserve(skb, NET_IP_ALIGN); + skb->dev = queue->info->netdev; + + return skb; +} + - /* Is the batch large enough to be worthwhile? */ - if (i < (queue->rx_target/2)) { - if (req_prod > queue->rx.sring->req_prod) - goto push; +static void xennet_alloc_rx_buffers(struct netfront_queue *queue) +{ + RING_IDX req_prod = queue->rx.req_prod_pvt; + int notify; + + if (unlikely(!netif_carrier_ok(queue->info->netdev))) return; - } - /* Adjust our fill target if we risked running out of buffers. */ - if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) && - ((queue->rx_target *= 2) > queue->rx_max_target)) - queue->rx_target = queue->rx_max_target; + for (req_prod = queue->rx.req_prod_pvt; + req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; + req_prod++) { + struct sk_buff *skb; + unsigned short id; + grant_ref_t ref; + unsigned long pfn; + struct xen_netif_rx_request *req; - refill: - for (i = 0; ; i++) { - skb = __skb_dequeue(&queue->rx_batch); - if (skb == NULL) + skb = xennet_alloc_one_rx_buffer(queue); + if (!skb) break; - skb->dev = queue->info->netdev; - - id = xennet_rxidx(req_prod + i); + id = xennet_rxidx(req_prod); BUG_ON(queue->rx_skbs[id]); queue->rx_skbs[id] = skb; @@ -345,9 +318,8 @@ no_skb: queue->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); - vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); - req = RING_GET_REQUEST(&queue->rx, req_prod + i); + req = RING_GET_REQUEST(&queue->rx, req_prod); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, pfn_to_mfn(pfn), @@ -357,11 +329,16 @@ no_skb: req->gref = ref; } + queue->rx.req_prod_pvt = req_prod; + + /* Not enough requests? Try again later. */ + if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { + mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); + return; + } + wmb(); /* barrier so backend seens requests */ - /* Above is a suitable barrier to ensure backend will see requests. */ - queue->rx.req_prod_pvt = req_prod + i; - push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); if (notify) notify_remote_via_irq(queue->rx_irq); @@ -627,6 +604,9 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) slots, skb->len); if (skb_linearize(skb)) goto drop; + data = skb->data; + offset = offset_in_page(data); + len = skb_headlen(skb); } spin_lock_irqsave(&queue->tx_lock, flags); @@ -1065,13 +1045,6 @@ err: work_done -= handle_incoming_queue(queue, &rxq); - /* If we get a callback with very few responses, reduce fill target. */ - /* NB. Note exponential increase, linear decrease. */ - if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) > - ((3*queue->rx_target) / 4)) && - (--queue->rx_target < queue->rx_min_target)) - queue->rx_target = queue->rx_min_target; - xennet_alloc_rx_buffers(queue); if (work_done < budget) { @@ -1638,11 +1611,6 @@ static int xennet_init_queue(struct netfront_queue *queue) spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); - skb_queue_head_init(&queue->rx_batch); - queue->rx_target = RX_DFL_MIN_TARGET; - queue->rx_min_target = RX_DFL_MIN_TARGET; - queue->rx_max_target = RX_MAX_TARGET; - init_timer(&queue->rx_refill_timer); queue->rx_refill_timer.data = (unsigned long)queue; queue->rx_refill_timer.function = rx_refill_timeout; @@ -1665,7 +1633,7 @@ static int xennet_init_queue(struct netfront_queue *queue) } /* A grant for every tx ring slot */ - if (gnttab_alloc_grant_references(TX_MAX_TARGET, + if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, &queue->gref_tx_head) < 0) { pr_alert("can't alloc tx grant refs\n"); err = -ENOMEM; @@ -1673,7 +1641,7 @@ static int xennet_init_queue(struct netfront_queue *queue) } /* A grant for every rx ring slot */ - if (gnttab_alloc_grant_references(RX_MAX_TARGET, + if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, &queue->gref_rx_head) < 0) { pr_alert("can't alloc rx grant refs\n"); err = -ENOMEM; @@ -2141,83 +2109,18 @@ static const struct ethtool_ops xennet_ethtool_ops = }; #ifdef CONFIG_SYSFS -static ssize_t show_rxbuf_min(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *info = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; - - if (num_queues) - return sprintf(buf, "%u\n", info->queues[0].rx_min_target); - else - return sprintf(buf, "%u\n", RX_MIN_TARGET); -} - -static ssize_t store_rxbuf_min(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) +static ssize_t show_rxbuf(struct device *dev, + struct device_attribute *attr, char *buf) { - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *np = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; - char *endp; - unsigned long target; - unsigned int i; - struct netfront_queue *queue; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - target = simple_strtoul(buf, &endp, 0); - if (endp == buf) - return -EBADMSG; - - if (target < RX_MIN_TARGET) - target = RX_MIN_TARGET; - if (target > RX_MAX_TARGET) - target = RX_MAX_TARGET; - - for (i = 0; i < num_queues; ++i) { - queue = &np->queues[i]; - spin_lock_bh(&queue->rx_lock); - if (target > queue->rx_max_target) - queue->rx_max_target = target; - queue->rx_min_target = target; - if (target > queue->rx_target) - queue->rx_target = target; - - xennet_alloc_rx_buffers(queue); - - spin_unlock_bh(&queue->rx_lock); - } - return len; -} - -static ssize_t show_rxbuf_max(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *info = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; - - if (num_queues) - return sprintf(buf, "%u\n", info->queues[0].rx_max_target); - else - return sprintf(buf, "%u\n", RX_MAX_TARGET); + return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); } -static ssize_t store_rxbuf_max(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) +static ssize_t store_rxbuf(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) { - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *np = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; char *endp; unsigned long target; - unsigned int i = 0; - struct netfront_queue *queue = NULL; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -2226,44 +2129,15 @@ static ssize_t store_rxbuf_max(struct device *dev, if (endp == buf) return -EBADMSG; - if (target < RX_MIN_TARGET) - target = RX_MIN_TARGET; - if (target > RX_MAX_TARGET) - target = RX_MAX_TARGET; - - for (i = 0; i < num_queues; ++i) { - queue = &np->queues[i]; - spin_lock_bh(&queue->rx_lock); - if (target < queue->rx_min_target) - queue->rx_min_target = target; - queue->rx_max_target = target; - if (target < queue->rx_target) - queue->rx_target = target; - - xennet_alloc_rx_buffers(queue); + /* rxbuf_min and rxbuf_max are no longer configurable. */ - spin_unlock_bh(&queue->rx_lock); - } return len; } -static ssize_t show_rxbuf_cur(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct net_device *netdev = to_net_dev(dev); - struct netfront_info *info = netdev_priv(netdev); - unsigned int num_queues = netdev->real_num_tx_queues; - - if (num_queues) - return sprintf(buf, "%u\n", info->queues[0].rx_target); - else - return sprintf(buf, "0\n"); -} - static struct device_attribute xennet_attrs[] = { - __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), - __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), - __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), + __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), + __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), + __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 440291ab7263202fc8017c3b9f09f8318f5f872a..fc02e8d6a1936d89dffae06d78441cd0ba62ab5a 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -29,8 +29,8 @@ #include #include #include -#include #include +#include #include #include diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 0ea756b7751941f4594ed8f67c60bad9bb271c75..05722085a59fafb4c8a66db1095a88bcb25d7970 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -28,8 +28,8 @@ #include #include #include -#include #include +#include #include #include @@ -72,7 +72,6 @@ struct st21nfca_i2c_phy { struct nfc_hci_dev *hdev; unsigned int gpio_ena; - unsigned int gpio_irq; unsigned int irq_polarity; struct sk_buff *pending_skb; @@ -531,20 +530,12 @@ static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client) "clf_enable"); if (r) { nfc_err(&client->dev, "Failed to request enable pin\n"); - return -ENODEV; + return r; } phy->gpio_ena = gpio; - /* IRQ */ - r = irq_of_parse_and_map(pp, 0); - if (r < 0) { - nfc_err(&client->dev, "Unable to get irq, error: %d\n", r); - return r; - } - - phy->irq_polarity = irq_get_trigger_type(r); - client->irq = r; + phy->irq_polarity = irq_get_trigger_type(client->irq); return 0; } @@ -560,7 +551,6 @@ static int st21nfca_hci_i2c_request_resources(struct i2c_client *client) struct st21nfca_nfc_platform_data *pdata; struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client); int r; - int irq; pdata = client->dev.platform_data; if (pdata == NULL) { @@ -569,36 +559,18 @@ static int st21nfca_hci_i2c_request_resources(struct i2c_client *client) } /* store for later use */ - phy->gpio_irq = pdata->gpio_irq; phy->gpio_ena = pdata->gpio_ena; phy->irq_polarity = pdata->irq_polarity; - r = devm_gpio_request_one(&client->dev, phy->gpio_irq, GPIOF_IN, - "wake_up"); - if (r) { - pr_err("%s : gpio_request failed\n", __FILE__); - return -ENODEV; - } - if (phy->gpio_ena > 0) { r = devm_gpio_request_one(&client->dev, phy->gpio_ena, GPIOF_OUT_INIT_HIGH, "clf_enable"); if (r) { pr_err("%s : ena gpio_request failed\n", __FILE__); - return -ENODEV; + return r; } } - /* IRQ */ - irq = gpio_to_irq(phy->gpio_irq); - if (irq < 0) { - nfc_err(&client->dev, - "Unable to get irq number for GPIO %d error %d\n", - phy->gpio_irq, r); - return -ENODEV; - } - client->irq = irq; - return 0; } @@ -656,7 +628,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, r = st21nfca_hci_platform_init(phy); if (r < 0) { nfc_err(&client->dev, "Unable to reboot st21nfca\n"); - return -ENODEV; + return r; } r = devm_request_threaded_irq(&client->dev, client->irq, NULL, @@ -687,10 +659,13 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client) return 0; } +#ifdef CONFIG_OF static const struct of_device_id of_st21nfca_i2c_match[] = { { .compatible = "st,st21nfca_i2c", }, {} }; +MODULE_DEVICE_TABLE(of, of_st21nfca_i2c_match); +#endif static struct i2c_driver st21nfca_hci_i2c_driver = { .driver = { diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c index a89e56c2c74954a0ac248bb00f603cd0528af3c4..f2596c8d68b0b36b12501914e7a1ea49ce16c058 100644 --- a/drivers/nfc/st21nfca/st21nfca.c +++ b/drivers/nfc/st21nfca/st21nfca.c @@ -77,10 +77,6 @@ ((p & 0x0f) == (ST21NFCA_DM_PIPE_CREATED | ST21NFCA_DM_PIPE_OPEN)) #define ST21NFCA_NFC_MODE 0x03 /* NFC_MODE parameter*/ -#define ST21NFCA_EVT_FIELD_ON 0x11 -#define ST21NFCA_EVT_CARD_DEACTIVATED 0x12 -#define ST21NFCA_EVT_CARD_ACTIVATED 0x13 -#define ST21NFCA_EVT_FIELD_OFF 0x14 static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES); @@ -841,31 +837,11 @@ static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev, static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event, struct sk_buff *skb) { - int r; - struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); - - pr_debug("hci event: %d\n", event); + pr_debug("hci event: %d gate: %x\n", event, gate); - switch (event) { - case ST21NFCA_EVT_CARD_ACTIVATED: - if (gate == ST21NFCA_RF_CARD_F_GATE) - info->dep_info.curr_nfc_dep_pni = 0; - break; - case ST21NFCA_EVT_CARD_DEACTIVATED: - break; - case ST21NFCA_EVT_FIELD_ON: - break; - case ST21NFCA_EVT_FIELD_OFF: - break; - case ST21NFCA_EVT_SEND_DATA: - if (gate == ST21NFCA_RF_CARD_F_GATE) { - r = st21nfca_tm_event_send_data(hdev, skb, gate); - if (r < 0) - return r; - return 0; - } - info->dep_info.curr_nfc_dep_pni = 0; - return 1; + switch (gate) { + case ST21NFCA_RF_CARD_F_GATE: + return st21nfca_dep_event_received(hdev, event, skb); default: return 1; } diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h index a0b77f1ba6d905ae07f322414250bfc4ee6cbc9c..7c2a852922304513c4bd52794e0b03eb7de11d23 100644 --- a/drivers/nfc/st21nfca/st21nfca.h +++ b/drivers/nfc/st21nfca/st21nfca.h @@ -85,6 +85,4 @@ struct st21nfca_hci_info { #define ST21NFCA_RF_CARD_F_GATE 0x24 -#define ST21NFCA_EVT_SEND_DATA 0x10 - #endif /* __LOCAL_ST21NFCA_H_ */ diff --git a/drivers/nfc/st21nfca/st21nfca_dep.c b/drivers/nfc/st21nfca/st21nfca_dep.c index bfb6df56c5058d36d007c0753a8aa31e25e4f9ce..8882181d65de7c71c3f5940d9aee422d5a57ecd1 100644 --- a/drivers/nfc/st21nfca/st21nfca_dep.c +++ b/drivers/nfc/st21nfca/st21nfca_dep.c @@ -49,6 +49,12 @@ #define ST21NFCA_LR_BITS_PAYLOAD_SIZE_254B 0x30 #define ST21NFCA_GB_BIT 0x02 +#define ST21NFCA_EVT_SEND_DATA 0x10 +#define ST21NFCA_EVT_FIELD_ON 0x11 +#define ST21NFCA_EVT_CARD_DEACTIVATED 0x12 +#define ST21NFCA_EVT_CARD_ACTIVATED 0x13 +#define ST21NFCA_EVT_FIELD_OFF 0x14 + #define ST21NFCA_EVT_CARD_F_BITRATE 0x16 #define ST21NFCA_EVT_READER_F_BITRATE 0x13 #define ST21NFCA_PSL_REQ_SEND_SPEED(brs) (brs & 0x38) @@ -372,8 +378,8 @@ exit: return r; } -int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb, - u8 gate) +static int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, + struct sk_buff *skb) { u8 cmd0, cmd1; int r; @@ -400,7 +406,42 @@ int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb, } return r; } -EXPORT_SYMBOL(st21nfca_tm_event_send_data); + +/* + * Returns: + * <= 0: driver handled the event, skb consumed + * 1: driver does not handle the event, please do standard processing + */ +int st21nfca_dep_event_received(struct nfc_hci_dev *hdev, + u8 event, struct sk_buff *skb) +{ + int r = 0; + struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); + + pr_debug("dep event: %d\n", event); + + switch (event) { + case ST21NFCA_EVT_CARD_ACTIVATED: + info->dep_info.curr_nfc_dep_pni = 0; + break; + case ST21NFCA_EVT_CARD_DEACTIVATED: + break; + case ST21NFCA_EVT_FIELD_ON: + break; + case ST21NFCA_EVT_FIELD_OFF: + break; + case ST21NFCA_EVT_SEND_DATA: + r = st21nfca_tm_event_send_data(hdev, skb); + if (r < 0) + return r; + return 0; + default: + return 1; + } + kfree_skb(skb); + return r; +} +EXPORT_SYMBOL(st21nfca_dep_event_received); static void st21nfca_im_send_psl_req(struct nfc_hci_dev *hdev, u8 did, u8 bsi, u8 bri, u8 lri) diff --git a/drivers/nfc/st21nfca/st21nfca_dep.h b/drivers/nfc/st21nfca/st21nfca_dep.h index ca213dee9c6e66537389c03a0e9682cef08471b1..baf4664b4fc44d6db40aba4e879425df66d2e5e9 100644 --- a/drivers/nfc/st21nfca/st21nfca_dep.h +++ b/drivers/nfc/st21nfca/st21nfca_dep.h @@ -32,8 +32,8 @@ struct st21nfca_dep_info { u8 lri; } __packed; -int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb, - u8 gate); +int st21nfca_dep_event_received(struct nfc_hci_dev *hdev, + u8 event, struct sk_buff *skb); int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb); int st21nfca_im_send_atr_req(struct nfc_hci_dev *hdev, u8 *gb, size_t gb_len); diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c index c5d2427a3db276064c9669722b2039e5ddf90e3f..01ba865863ee93dfb6843ae26d182efa288b6284 100644 --- a/drivers/nfc/st21nfcb/i2c.c +++ b/drivers/nfc/st21nfcb/i2c.c @@ -50,7 +50,6 @@ struct st21nfcb_i2c_phy { struct i2c_client *i2c_dev; struct llt_ndlc *ndlc; - unsigned int gpio_irq; unsigned int gpio_reset; unsigned int irq_polarity; @@ -81,8 +80,6 @@ static void st21nfcb_nci_i2c_disable(void *phy_id) { struct st21nfcb_i2c_phy *phy = phy_id; - pr_info("\n"); - phy->powered = 0; /* reset chip in order to flush clf */ gpio_set_value(phy->gpio_reset, 0); @@ -258,19 +255,11 @@ static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client) GPIOF_OUT_INIT_HIGH, "clf_reset"); if (r) { nfc_err(&client->dev, "Failed to request reset pin\n"); - return -ENODEV; - } - phy->gpio_reset = gpio; - - /* IRQ */ - r = irq_of_parse_and_map(pp, 0); - if (r < 0) { - nfc_err(&client->dev, "Unable to get irq, error: %d\n", r); return r; } + phy->gpio_reset = gpio; - phy->irq_polarity = irq_get_trigger_type(r); - client->irq = r; + phy->irq_polarity = irq_get_trigger_type(client->irq); return 0; } @@ -286,7 +275,6 @@ static int st21nfcb_nci_i2c_request_resources(struct i2c_client *client) struct st21nfcb_nfc_platform_data *pdata; struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client); int r; - int irq; pdata = client->dev.platform_data; if (pdata == NULL) { @@ -295,33 +283,15 @@ static int st21nfcb_nci_i2c_request_resources(struct i2c_client *client) } /* store for later use */ - phy->gpio_irq = pdata->gpio_irq; phy->gpio_reset = pdata->gpio_reset; phy->irq_polarity = pdata->irq_polarity; - r = devm_gpio_request_one(&client->dev, phy->gpio_irq, - GPIOF_IN, "clf_irq"); - if (r) { - pr_err("%s : gpio_request failed\n", __FILE__); - return -ENODEV; - } - r = devm_gpio_request_one(&client->dev, phy->gpio_reset, GPIOF_OUT_INIT_HIGH, "clf_reset"); if (r) { pr_err("%s : reset gpio_request failed\n", __FILE__); - return -ENODEV; - } - - /* IRQ */ - irq = gpio_to_irq(phy->gpio_irq); - if (irq < 0) { - nfc_err(&client->dev, - "Unable to get irq number for GPIO %d error %d\n", - phy->gpio_irq, r); - return -ENODEV; + return r; } - client->irq = irq; return 0; } @@ -401,10 +371,13 @@ static int st21nfcb_nci_i2c_remove(struct i2c_client *client) return 0; } +#ifdef CONFIG_OF static const struct of_device_id of_st21nfcb_i2c_match[] = { { .compatible = "st,st21nfcb_i2c", }, {} }; +MODULE_DEVICE_TABLE(of, of_st21nfcb_i2c_match); +#endif static struct i2c_driver st21nfcb_nci_i2c_driver = { .driver = { diff --git a/drivers/nfc/st21nfcb/ndlc.c b/drivers/nfc/st21nfcb/ndlc.c index e7bff8921d1143876246c90955649181af573dce..bac50e805f1d565ba17d4c0056fc8a2f2847ae90 100644 --- a/drivers/nfc/st21nfcb/ndlc.c +++ b/drivers/nfc/st21nfcb/ndlc.c @@ -266,7 +266,7 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev, *ndlc_id = ndlc; - /* start timers */ + /* initialize timers */ init_timer(&ndlc->t1_timer); ndlc->t1_timer.data = (unsigned long)ndlc; ndlc->t1_timer.function = ndlc_t1_timeout; diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 8b3f5599180595df9cd28422fe1dec2ddc2183aa..f1b5111bbaba42a690ae573ce7a699ab60ebcaaa 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -71,7 +71,7 @@ config CLAW config QETH def_tristate y prompt "Gigabit Ethernet device support" - depends on CCW && NETDEVICES && IP_MULTICAST && QDIO + depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET help This driver supports the IBM System z OSA Express adapters in QDIO mode (all media types), HiperSockets interfaces and z/VM diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index 6bcfbbb20f04c6525fa3363e66422009fe6072b6..47773c4d235a7cc3787a116887e115d4d3997e21 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c @@ -44,8 +44,8 @@ static ssize_t ctcm_buffer_write(struct device *dev, return -ENODEV; } - rc = sscanf(buf, "%u", &bs1); - if (rc != 1) + rc = kstrtouint(buf, 0, &bs1); + if (rc) goto einval; if (bs1 > CTCM_BUFSIZE_LIMIT) goto einval; @@ -151,8 +151,8 @@ static ssize_t ctcm_proto_store(struct device *dev, if (!priv) return -ENODEV; - rc = sscanf(buf, "%d", &value); - if ((rc != 1) || + rc = kstrtoint(buf, 0, &value); + if (rc || !((value == CTCM_PROTO_S390) || (value == CTCM_PROTO_LINUX) || (value == CTCM_PROTO_MPC) || diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 0a7d87c372b8414e17c41e05b50cf974e5ba0bd3..92190aa20b9fa1c8940bbdf79c2190bc99033e3d 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1943,15 +1943,16 @@ static ssize_t lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lcs_card *card; - int value, rc; + int rc; + s16 value; card = dev_get_drvdata(dev); if (!card) return 0; - rc = sscanf(buf, "%d", &value); - if (rc != 1) + rc = kstrtos16(buf, 0, &value); + if (rc) return -EINVAL; /* TODO: sanity checks */ card->portno = value; @@ -2007,8 +2008,8 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char if (!card) return 0; - rc = sscanf(buf, "%u", &value); - if (rc != 1) + rc = kstrtouint(buf, 0, &value); + if (rc) return -EINVAL; /* TODO: sanity checks */ card->lancmd_timeout = value; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index e7646ce3d659218ca7fd9762e05cf0d7c4c95a5c..7a8bb9f78e7639d915d478e0ee0cc11b3f905f4a 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -380,11 +380,6 @@ enum qeth_header_ids { #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ -static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) -{ - return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); -} - enum qeth_qdio_buffer_states { /* * inbound: read out by driver; owned by hardware in order to be filled @@ -843,13 +838,6 @@ struct qeth_trap_id { /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") -static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) -{ - struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *) - dev_get_drvdata(&cdev->dev))->dev); - return card; -} - static inline int qeth_get_micros(void) { return (int) (get_tod_clock() >> 12); @@ -894,7 +882,6 @@ const char *qeth_get_cardname_short(struct qeth_card *); int qeth_realloc_buffer_pool(struct qeth_card *, int); int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); void qeth_core_free_discipline(struct qeth_card *); -void qeth_buffer_reclaim_work(struct work_struct *); /* exports for qeth discipline device drivers */ extern struct qeth_card_list_struct qeth_core_card_list; @@ -913,7 +900,6 @@ int qeth_core_hardsetup_card(struct qeth_card *); void qeth_print_status_message(struct qeth_card *); int qeth_init_qdio_queues(struct qeth_card *); int qeth_send_startlan(struct qeth_card *); -int qeth_send_stoplan(struct qeth_card *); int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, int (*reply_cb) (struct qeth_card *, struct qeth_reply *, unsigned long), @@ -954,8 +940,6 @@ int qeth_snmp_command(struct qeth_card *, char __user *); int qeth_query_oat_command(struct qeth_card *, char __user *); int qeth_query_switch_attributes(struct qeth_card *card, struct qeth_switch_info *sw_info); -int qeth_query_card_info(struct qeth_card *card, - struct carrier_info *carrier_info); int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index fd22c811cbe195d2056f6bb5b6339fa33113b8cc..f407e3763432648f3271a80e2d432b7da8736c65 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -718,6 +718,13 @@ static int qeth_check_idx_response(struct qeth_card *card, return 0; } +static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) +{ + struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *) + dev_get_drvdata(&cdev->dev))->dev); + return card; +} + static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, __u32 len) { @@ -1431,6 +1438,7 @@ static void qeth_start_kernel_thread(struct work_struct *work) } } +static void qeth_buffer_reclaim_work(struct work_struct *); static int qeth_setup_card(struct qeth_card *card) { @@ -3232,7 +3240,7 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, } EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); -void qeth_buffer_reclaim_work(struct work_struct *work) +static void qeth_buffer_reclaim_work(struct work_struct *work) { struct qeth_card *card = container_of(work, struct qeth_card, buffer_reclaim_work.work); @@ -4126,7 +4134,7 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); if (cmd->hdr.return_code) { - QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } card->info.promisc_mode = setparms->data.mode; @@ -4493,13 +4501,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, snmp = &cmd->data.setadapterparms.data.snmp; if (cmd->hdr.return_code) { - QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); return 0; } if (cmd->data.setadapterparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; - QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); return 0; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); @@ -4717,7 +4725,7 @@ static int qeth_query_card_info_cb(struct qeth_card *card, return 0; } -int qeth_query_card_info(struct qeth_card *card, +static int qeth_query_card_info(struct qeth_card *card, struct carrier_info *carrier_info) { struct qeth_cmd_buffer *iob; @@ -4730,7 +4738,6 @@ int qeth_query_card_info(struct qeth_card *card, return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, (void *)carrier_info); } -EXPORT_SYMBOL_GPL(qeth_query_card_info); static inline int qeth_get_qdio_q_format(struct qeth_card *card) { @@ -5113,6 +5120,11 @@ static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, return 0; } +static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) +{ + return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); +} + struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, struct qeth_qdio_buffer *qethbuffer, struct qdio_buffer_element **__element, int *__offset, diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index c2679bfe7f6653ba369b8646eada7e554576768a..d02cd1a679432fc7ef485295d308bac046f96df9 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1512,7 +1512,7 @@ static void qeth_bridge_state_change(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "brstchng"); if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { - QETH_CARD_TEXT_(card, 2, "BPsz%.8d", qports->entry_length); + QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); return; } extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries; diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 29c1c00e3a0f3dcdb826e0e31c33bde2ceb88dcc..551a4b4c03fd6563510ea69a574debb34797310c 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -42,10 +42,6 @@ struct qeth_ipato_entry { }; -void qeth_l3_ipaddr4_to_string(const __u8 *, char *); -int qeth_l3_string_to_ipaddr4(const char *, __u8 *); -void qeth_l3_ipaddr6_to_string(const __u8 *, char *); -int qeth_l3_string_to_ipaddr6(const char *, __u8 *); void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *); int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *); int qeth_l3_create_device_attributes(struct device *); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index afebb9709763b0a011320b9ad7e66aa709eca9a3..625227ad16ee91cd2b4ca1aaa8a9541408e2a838 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -55,12 +55,12 @@ static int qeth_l3_isxdigit(char *buf) return 1; } -void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) +static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) { sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]); } -int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) +static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) { int count = 0, rc = 0; unsigned int in[4]; @@ -78,12 +78,12 @@ int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) return 0; } -void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) +static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) { sprintf(buf, "%pI6", addr); } -int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) +static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) { const char *end, *end_tmp, *start; __u16 *in; @@ -2502,7 +2502,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) rc = -EFAULT; goto free_and_out; } - QETH_CARD_TEXT_(card, 4, "qacts"); + QETH_CARD_TEXT(card, 4, "qacts"); } free_and_out: kfree(qinfo.udata); diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c index 065a87ace623b458a87bf177532a0dfafda67dbe..2d1c4ebd40f91d7b0f7e58a57d4d89f871456910 100644 --- a/drivers/scsi/csiostor/csio_attr.c +++ b/drivers/scsi/csiostor/csio_attr.c @@ -451,9 +451,9 @@ csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln) /* Process Mbox response of VNP command */ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); - if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { + if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n", - FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16))); + FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16))); ret = -EINVAL; goto out_free; } @@ -526,9 +526,9 @@ csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln) /* Process Mbox response of VNP command */ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); - if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { + if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n", - FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16))); + FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16))); ret = -EINVAL; } diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 0eaec474895788a6437e12c9907e053c9f74b048..9ab997e18b20bce936bb7b0c34ee040edc2721f5 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -47,7 +47,6 @@ #include "csio_lnode.h" #include "csio_rnode.h" -int csio_force_master; int csio_dbg_level = 0xFEFF; unsigned int csio_port_mask = 0xf; @@ -650,10 +649,10 @@ static void csio_hw_print_fw_version(struct csio_hw *hw, char *str) { csio_info(hw, "%s: %u.%u.%u.%u\n", str, - FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), - FW_HDR_FW_VER_MINOR_GET(hw->fwrev), - FW_HDR_FW_VER_MICRO_GET(hw->fwrev), - FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); + FW_HDR_FW_VER_MAJOR_G(hw->fwrev), + FW_HDR_FW_VER_MINOR_G(hw->fwrev), + FW_HDR_FW_VER_MICRO_G(hw->fwrev), + FW_HDR_FW_VER_BUILD_G(hw->fwrev)); } /* @@ -706,9 +705,9 @@ csio_hw_check_fw_version(struct csio_hw *hw) if (ret) return ret; - major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev); - minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); - micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev); + major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev); + minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev); + micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev); if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */ csio_err(hw, "card FW has major version %u, driver wants %u\n", @@ -889,7 +888,6 @@ csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) { struct csio_mb *mbp; int rv = 0; - enum csio_dev_master master; enum fw_retval retval; uint8_t mpfn; char state_str[16]; @@ -904,11 +902,9 @@ csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) goto out; } - master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY; - retry: csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, - hw->pfn, master, NULL); + hw->pfn, CSIO_MASTER_MAY, NULL); rv = csio_mb_issue(hw, mbp); if (rv) { @@ -1170,7 +1166,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) } csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, - PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1), + PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F, NULL); if (csio_mb_issue(hw, mbp)) { @@ -1370,13 +1366,13 @@ csio_hw_fw_config_file(struct csio_hw *hw, caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); caps_cmd->op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); caps_cmd->cfvalid_to_len16 = - htonl(FW_CAPS_CONFIG_CMD_CFVALID | - FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | - FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | + htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | FW_LEN16(*caps_cmd)); if (csio_mb_issue(hw, mbp)) { @@ -1407,9 +1403,9 @@ csio_hw_fw_config_file(struct csio_hw *hw, * And now tell the firmware to use the configuration we just loaded. */ caps_cmd->op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); if (csio_mb_issue(hw, mbp)) { @@ -1678,7 +1674,7 @@ csio_get_fcoe_resinfo(struct csio_hw *hw) } rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); - retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); + retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); if (retval != FW_SUCCESS) { csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", retval); @@ -1723,8 +1719,8 @@ csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) * Find out whether we're dealing with a version of * the firmware which has configuration file support. */ - _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, ARRAY_SIZE(_param), _param, NULL, false, NULL); @@ -1781,8 +1777,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) goto leave; } - mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); - maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; + mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); + maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; ret = csio_memory_write(hw, mtype, maddr, cf->size + value_to_add, cfg_data); @@ -1871,8 +1867,8 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) goto bye; } } else { - mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); - maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; + mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); + maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; using_flash = 0; } @@ -1998,13 +1994,13 @@ csio_hw_flash_fw(struct csio_hw *hw) hdr = (const struct fw_hdr *)fw->data; fw_ver = ntohl(hdr->fw_ver); - if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw)) + if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw)) return -EINVAL; /* wrong major version, won't do */ /* * If the flash FW is unusable or we found something newer, load it. */ - if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) || + if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) || fw_ver > hw->fwrev) { ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, /*force=*/false); diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 5db2d85195b1571b9029b8fb2cc3ea30df274d3d..68248da1b9afcaf7f0cdb7ca5c249070c6c7cb3e 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h @@ -110,7 +110,6 @@ struct csio_scsi_cpu_info { }; extern int csio_dbg_level; -extern int csio_force_master; extern unsigned int csio_port_mask; extern int csio_msi; diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h index bca0de61ae80bbc8cc44e6877abd82a19d947e9c..4752fed476dfb293e04d5860e1fad2f6116f9327 100644 --- a/drivers/scsi/csiostor/csio_hw_chip.h +++ b/drivers/scsi/csiostor/csio_hw_chip.h @@ -36,60 +36,13 @@ #include "csio_defs.h" -/* FCoE device IDs for T4 */ -#define CSIO_DEVID_T440DBG_FCOE 0x4600 -#define CSIO_DEVID_T420CR_FCOE 0x4601 -#define CSIO_DEVID_T422CR_FCOE 0x4602 -#define CSIO_DEVID_T440CR_FCOE 0x4603 -#define CSIO_DEVID_T420BCH_FCOE 0x4604 -#define CSIO_DEVID_T440BCH_FCOE 0x4605 -#define CSIO_DEVID_T440CH_FCOE 0x4606 -#define CSIO_DEVID_T420SO_FCOE 0x4607 -#define CSIO_DEVID_T420CX_FCOE 0x4608 -#define CSIO_DEVID_T420BT_FCOE 0x4609 -#define CSIO_DEVID_T404BT_FCOE 0x460A -#define CSIO_DEVID_B420_FCOE 0x460B -#define CSIO_DEVID_B404_FCOE 0x460C -#define CSIO_DEVID_T480CR_FCOE 0x460D -#define CSIO_DEVID_T440LPCR_FCOE 0x460E -#define CSIO_DEVID_AMSTERDAM_T4_FCOE 0x460F -#define CSIO_DEVID_HUAWEI_T480_FCOE 0x4680 -#define CSIO_DEVID_HUAWEI_T440_FCOE 0x4681 -#define CSIO_DEVID_HUAWEI_STG310_FCOE 0x4682 -#define CSIO_DEVID_ACROMAG_XMC_XAUI 0x4683 -#define CSIO_DEVID_ACROMAG_XMC_SFP_FCOE 0x4684 -#define CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE 0x4685 -#define CSIO_DEVID_HUAWEI_10GT_FCOE 0x4686 -#define CSIO_DEVID_HUAWEI_T440_TOE_FCOE 0x4687 - -/* FCoE device IDs for T5 */ -#define CSIO_DEVID_T580DBG_FCOE 0x5600 -#define CSIO_DEVID_T520CR_FCOE 0x5601 -#define CSIO_DEVID_T522CR_FCOE 0x5602 -#define CSIO_DEVID_T540CR_FCOE 0x5603 -#define CSIO_DEVID_T520BCH_FCOE 0x5604 -#define CSIO_DEVID_T540BCH_FCOE 0x5605 -#define CSIO_DEVID_T540CH_FCOE 0x5606 -#define CSIO_DEVID_T520SO_FCOE 0x5607 -#define CSIO_DEVID_T520CX_FCOE 0x5608 -#define CSIO_DEVID_T520BT_FCOE 0x5609 -#define CSIO_DEVID_T504BT_FCOE 0x560A -#define CSIO_DEVID_B520_FCOE 0x560B -#define CSIO_DEVID_B504_FCOE 0x560C -#define CSIO_DEVID_T580CR2_FCOE 0x560D -#define CSIO_DEVID_T540LPCR_FCOE 0x560E -#define CSIO_DEVID_AMSTERDAM_T5_FCOE 0x560F -#define CSIO_DEVID_T580LPCR_FCOE 0x5610 -#define CSIO_DEVID_T520LLCR_FCOE 0x5611 -#define CSIO_DEVID_T560CR_FCOE 0x5612 -#define CSIO_DEVID_T580CR_FCOE 0x5613 - /* Define MACRO values */ #define CSIO_HW_T4 0x4000 #define CSIO_T4_FCOE_ASIC 0x4600 #define CSIO_HW_T5 0x5000 #define CSIO_T5_FCOE_ASIC 0x5600 #define CSIO_HW_CHIP_MASK 0xF000 + #define T4_REGMAP_SIZE (160 * 1024) #define T5_REGMAP_SIZE (332 * 1024) #define FW_FNAME_T4 "cxgb4/t4fw.bin" diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c index 89ecbac5478f6a14cfe60e77517f172062a0f9a8..95d831857640eecb28cab0dd2e67d7d94a028999 100644 --- a/drivers/scsi/csiostor/csio_hw_t4.c +++ b/drivers/scsi/csiostor/csio_hw_t4.c @@ -307,12 +307,12 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, * MEM_EDC1 = 1 * MEM_MC = 2 -- T4 */ - edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR)); + edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A)); if (mtype != MEM_MC1) memoffset = (mtype * (edc_size * 1024 * 1024)); else { - mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw, - MA_EXT_MEMORY_BAR)); + mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw, + MA_EXT_MEMORY_BAR_A)); memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; } @@ -383,11 +383,12 @@ static void csio_t4_dfs_create_ext_mem(struct csio_hw *hw) { u32 size; - int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); - if (i & EXT_MEM_ENABLE) { - size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR); + int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A); + + if (i & EXT_MEM_ENABLE_F) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A); csio_add_debugfs_mem(hw, "mc", MEM_MC, - EXT_MEM_SIZE_GET(size)); + EXT_MEM_SIZE_G(size)); } } diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c index 27745c170c24770360052457556047f2e261e2c0..66e180a58718b1c7e7b8c76f806aa0f38582c498 100644 --- a/drivers/scsi/csiostor/csio_hw_t5.c +++ b/drivers/scsi/csiostor/csio_hw_t5.c @@ -298,12 +298,12 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, * MEM_MC0 = 2 -- For T5 * MEM_MC1 = 3 -- For T5 */ - edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR)); + edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A)); if (mtype != MEM_MC1) memoffset = (mtype * (edc_size * 1024 * 1024)); else { - mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw, - MA_EXT_MEMORY_BAR)); + mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw, + MA_EXT_MEMORY_BAR_A)); memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; } @@ -372,16 +372,17 @@ static void csio_t5_dfs_create_ext_mem(struct csio_hw *hw) { u32 size; - int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); - if (i & EXT_MEM_ENABLE) { - size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR); + int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A); + + if (i & EXT_MEM_ENABLE_F) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A); csio_add_debugfs_mem(hw, "mc0", MEM_MC0, - EXT_MEM_SIZE_GET(size)); + EXT_MEM_SIZE_G(size)); } - if (i & EXT_MEM1_ENABLE) { - size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR); + if (i & EXT_MEM1_ENABLE_F) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR_A); csio_add_debugfs_mem(hw, "mc1", MEM_MC1, - EXT_MEM_SIZE_GET(size)); + EXT_MEM_SIZE_G(size)); } } diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 17794add855c4c5097e7db2a7eaa561699a9ecb9..34d20cc3e110fcc1ec99aad913b74e87651734e0 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -128,10 +128,10 @@ static int csio_setup_debugfs(struct csio_hw *hw) if (IS_ERR_OR_NULL(hw->debugfs_root)) return -1; - i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); - if (i & EDRAM0_ENABLE) + i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A); + if (i & EDRAM0_ENABLE_F) csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); - if (i & EDRAM1_ENABLE) + if (i & EDRAM1_ENABLE_F) csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); hw->chip_ops->chip_dfs_create_ext_mem(hw); @@ -955,6 +955,10 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) struct csio_hw *hw; struct csio_lnode *ln; + /* probe only T5 cards */ + if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK))) + return -ENODEV; + rv = csio_pci_init(pdev, &bars); if (rv) goto err; @@ -974,10 +978,10 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) } sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), - FW_HDR_FW_VER_MINOR_GET(hw->fwrev), - FW_HDR_FW_VER_MICRO_GET(hw->fwrev), - FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); + FW_HDR_FW_VER_MAJOR_G(hw->fwrev), + FW_HDR_FW_VER_MINOR_G(hw->fwrev), + FW_HDR_FW_VER_MICRO_G(hw->fwrev), + FW_HDR_FW_VER_BUILD_G(hw->fwrev)); for (i = 0; i < hw->num_pports; i++) { ln = csio_shost_init(hw, &pdev->dev, true, NULL); @@ -1167,53 +1171,21 @@ static struct pci_error_handlers csio_err_handler = { .resume = csio_pci_resume, }; -static const struct pci_device_id csio_pci_tbl[] = { - CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */ - CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */ - CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */ - CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */ - CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */ - CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */ - CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */ - CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */ - CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */ - CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */ - CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0), /* AMSTERDAM T4 FCOE */ - CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0), /* HUAWEI T480 FCOE */ - CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0), /* HUAWEI T440 FCOE */ - CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0), /* HUAWEI STG FCOE */ - CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0), /* ACROMAG XAUI FCOE */ - CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */ - CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0), /* HUAWEI 10GT FCOE */ - CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */ - CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0), /* T5 DEBUG FCOE */ - CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0), /* T520CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0), /* T522CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0), /* T540CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0), /* T520BCH FCOE */ - CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0), /* T540BCH FCOE */ - CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0), /* T540CH FCOE */ - CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0), /* T520SO FCOE */ - CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0), /* T520CX FCOE */ - CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0), /* T520BT FCOE */ - CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0), /* T504BT FCOE */ - CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0), /* B520 FCOE */ - CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0), /* B504 FCOE */ - CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0), /* T580 CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0), /* T540 LP-CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0), /* AMSTERDAM T5 FCOE */ - CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0), /* T580 LP-CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0), /* T520 LL-CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0), /* T560 CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0), /* T580 CR FCOE */ - { 0, 0, 0, 0, 0, 0, 0 } -}; +/* + * Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static struct pci_device_id csio_pci_tbl[] = { +/* Define for iSCSI uses PF5, FCoE uses PF6 */ +#define CH_PCI_DEVICE_ID_FUNCTION 0x5 +#define CH_PCI_DEVICE_ID_FUNCTION2 0x6 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { PCI_VDEVICE(CHELSIO, (devid)), 0 } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } } +#include "t4_pci_id_tbl.h" static struct pci_driver csio_pci_driver = { .name = KBUILD_MODNAME, diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index ffe9be04dc39ab5a6133715ea66ab644d2fd8be0..87f9280d9b431370bf9391debc3b1438183ea5d8 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -603,7 +603,7 @@ csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp) enum fw_retval retval; __be32 nport_id; - retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); + retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); if (retval != FW_SUCCESS) { csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval); mempool_free(mbp, hw->mb_mempool); @@ -770,7 +770,7 @@ csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp) (struct fw_fcoe_fcf_cmd *)(mbp->mb); enum fw_retval retval; - retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); + retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); if (retval != FW_SUCCESS) { csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n", retval); @@ -1506,7 +1506,7 @@ csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd) } } else if (cpl_op == CPL_FW6_PLD) { wr = (struct fw_wr_hdr *) (cmd + 4); - if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) + if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_RDEV_WR) { rdev_wr = (struct fw_rdev_wr *) (cmd + 4); @@ -1574,17 +1574,17 @@ out_pld: return; } else { csio_warn(hw, "unexpected WR op(0x%x) recv\n", - FW_WR_OP_GET(be32_to_cpu((wr->hi)))); + FW_WR_OP_G(be32_to_cpu((wr->hi)))); CSIO_INC_STATS(hw, n_cpl_unexp); } } else if (cpl_op == CPL_FW6_MSG) { wr = (struct fw_wr_hdr *) (cmd); - if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { + if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { csio_ln_mgmt_wr_handler(hw, wr, sizeof(struct fw_fcoe_els_ct_wr)); } else { csio_warn(hw, "unexpected WR op(0x%x) recv\n", - FW_WR_OP_GET(be32_to_cpu((wr->hi)))); + FW_WR_OP_G(be32_to_cpu((wr->hi)))); CSIO_INC_STATS(hw, n_cpl_unexp); } } else { @@ -1668,12 +1668,12 @@ csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, __be32 port_id; wr = (struct fw_fcoe_els_ct_wr *)fw_wr; - wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) | + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) | FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len)); wr_len = DIV_ROUND_UP(wr_len, 16); - wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) | - FW_WR_LEN16(wr_len)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) | + FW_WR_LEN16_V(wr_len)); wr->els_ct_type = sub_op; wr->ctl_pri = 0; wr->cp_en_class = 0; @@ -1757,7 +1757,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); else { /* Program DSGL to dma payload */ - dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | + dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE | ULPTX_NSGE(1)); dsgl.len0 = cpu_to_be32(pld_len); dsgl.addr0 = cpu_to_be64(pld->paddr); diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index 15b6351425469db7b7fed1c9c0565641da462558..08c265c0f353278b63a05ac6478423b67b47445e 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c @@ -59,7 +59,7 @@ csio_mb_fw_retval(struct csio_mb *mbp) hdr = (struct fw_cmd_hdr *)(mbp->mb); - return FW_CMD_RETVAL_GET(ntohl(hdr->lo)); + return FW_CMD_RETVAL_G(ntohl(hdr->lo)); } /* @@ -81,17 +81,17 @@ csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); - cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_HELLO_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); cmdp->err_to_clearinit = htonl( - FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) | - FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) | - FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ? - m_mbox : FW_HELLO_CMD_MBMASTER_MASK) | - FW_HELLO_CMD_MBASYNCNOT(a_mbox) | - FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | - FW_HELLO_CMD_CLEARINIT); + FW_HELLO_CMD_MASTERDIS_V(master == CSIO_MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE_V(master == CSIO_MASTER_MUST) | + FW_HELLO_CMD_MBMASTER_V(master == CSIO_MASTER_MUST ? + m_mbox : FW_HELLO_CMD_MBMASTER_M) | + FW_HELLO_CMD_MBASYNCNOT_V(a_mbox) | + FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) | + FW_HELLO_CMD_CLEARINIT_F); } @@ -112,17 +112,17 @@ csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp, struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb); uint32_t value; - *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); + *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); if (*retval == FW_SUCCESS) { hw->fwrev = ntohl(rsp->fwrev); value = ntohl(rsp->err_to_clearinit); - *mpfn = FW_HELLO_CMD_MBMASTER_GET(value); + *mpfn = FW_HELLO_CMD_MBMASTER_G(value); - if (value & FW_HELLO_CMD_INIT) + if (value & FW_HELLO_CMD_INIT_F) *state = CSIO_DEV_STATE_INIT; - else if (value & FW_HELLO_CMD_ERR) + else if (value & FW_HELLO_CMD_ERR_F) *state = CSIO_DEV_STATE_ERR; else *state = CSIO_DEV_STATE_UNINIT; @@ -144,9 +144,9 @@ csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); - cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_BYE_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); } @@ -167,9 +167,9 @@ csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); - cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_RESET_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); cmdp->val = htonl(reset); cmdp->halt_pkd = htonl(halt); @@ -202,12 +202,12 @@ csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); - cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | - (wr ? FW_CMD_WRITE : FW_CMD_READ) | - FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) | + FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); /* Write Params */ if (wr) { @@ -245,7 +245,7 @@ csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp, uint32_t i; __be32 *p = &rsp->param[0].val; - *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); + *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); if (*retval == FW_SUCCESS) for (i = 0; i < nparams; i++, p += 2) @@ -271,14 +271,14 @@ csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg) * specified PCI-E Configuration Space register. */ ldst_cmd->op_to_addrspace = - htonl(FW_CMD_OP(FW_LDST_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | - FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); + htonl(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE)); ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd)); - ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1); + ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); ldst_cmd->u.pcie.ctrl_to_fn = - (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn)); + (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn)); ldst_cmd->u.pcie.r = (uint8_t)reg; } @@ -306,10 +306,10 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1); - cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - (wr ? FW_CMD_WRITE : FW_CMD_READ)); - cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F)); + cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); /* Read config */ if (!wr) @@ -347,25 +347,25 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, void (*cbfn) (struct csio_hw *, struct csio_mb *)) { struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb); - unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); + unsigned int lfc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); - cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | - FW_CMD_REQUEST | - (wr ? FW_CMD_EXEC : FW_CMD_READ) | - FW_PORT_CMD_PORTID(portid)); + cmdp->op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | + (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) | + FW_PORT_CMD_PORTID_V(portid)); if (!wr) { cmdp->action_to_len16 = htonl( - FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); + FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); return; } /* Set port */ cmdp->action_to_len16 = htonl( - FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); + FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); if (fc & PAUSE_RX) lfc |= FW_PORT_CAP_FC_RX; @@ -393,7 +393,7 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp, { struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb); - *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16)); + *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16)); if (*retval == FW_SUCCESS) *caps = ntohs(rsp->u.info.pcap); @@ -415,9 +415,9 @@ csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); - cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_INITIALIZE_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); } @@ -443,18 +443,18 @@ csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); - cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | FW_CMD_EXEC | - FW_IQ_CMD_PFN(iq_params->pfn) | - FW_IQ_CMD_VFN(iq_params->vfn)); + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(iq_params->pfn) | + FW_IQ_CMD_VFN_V(iq_params->vfn)); - cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); cmdp->type_to_iqandstindex = htonl( - FW_IQ_CMD_VIID(iq_params->viid) | - FW_IQ_CMD_TYPE(iq_params->type) | - FW_IQ_CMD_IQASYNCH(iq_params->iqasynch)); + FW_IQ_CMD_VIID_V(iq_params->viid) | + FW_IQ_CMD_TYPE_V(iq_params->type) | + FW_IQ_CMD_IQASYNCH_V(iq_params->iqasynch)); cmdp->fl0size = htons(iq_params->fl0size); cmdp->fl0size = htons(iq_params->fl1size); @@ -488,8 +488,8 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); uint32_t iq_start_stop = (iq_params->iq_start) ? - FW_IQ_CMD_IQSTART(1) : - FW_IQ_CMD_IQSTOP(1); + FW_IQ_CMD_IQSTART_F : + FW_IQ_CMD_IQSTOP_F; /* * If this IQ write is cascaded with IQ alloc request, do not @@ -499,51 +499,51 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, if (!cascaded_req) CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); - cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE | - FW_IQ_CMD_PFN(iq_params->pfn) | - FW_IQ_CMD_VFN(iq_params->vfn)); + cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_IQ_CMD_PFN_V(iq_params->pfn) | + FW_IQ_CMD_VFN_V(iq_params->vfn)); cmdp->alloc_to_len16 |= htonl(iq_start_stop | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); cmdp->iqid |= htons(iq_params->iqid); cmdp->fl0id |= htons(iq_params->fl0id); cmdp->fl1id |= htons(iq_params->fl1id); cmdp->type_to_iqandstindex |= htonl( - FW_IQ_CMD_IQANDST(iq_params->iqandst) | - FW_IQ_CMD_IQANUS(iq_params->iqanus) | - FW_IQ_CMD_IQANUD(iq_params->iqanud) | - FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex)); + FW_IQ_CMD_IQANDST_V(iq_params->iqandst) | + FW_IQ_CMD_IQANUS_V(iq_params->iqanus) | + FW_IQ_CMD_IQANUD_V(iq_params->iqanud) | + FW_IQ_CMD_IQANDSTINDEX_V(iq_params->iqandstindex)); cmdp->iqdroprss_to_iqesize |= htons( - FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) | - FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) | - FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) | - FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) | - FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) | - FW_IQ_CMD_IQESIZE(iq_params->iqesize)); + FW_IQ_CMD_IQPCIECH_V(iq_params->iqpciech) | + FW_IQ_CMD_IQDCAEN_V(iq_params->iqdcaen) | + FW_IQ_CMD_IQDCACPU_V(iq_params->iqdcacpu) | + FW_IQ_CMD_IQINTCNTTHRESH_V(iq_params->iqintcntthresh) | + FW_IQ_CMD_IQCPRIO_V(iq_params->iqcprio) | + FW_IQ_CMD_IQESIZE_V(iq_params->iqesize)); cmdp->iqsize |= htons(iq_params->iqsize); cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr); if (iq_params->type == 0) { cmdp->iqns_to_fl0congen |= htonl( - FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)| - FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen)); + FW_IQ_CMD_IQFLINTIQHSEN_V(iq_params->iqflintiqhsen)| + FW_IQ_CMD_IQFLINTCONGEN_V(iq_params->iqflintcongen)); } if (iq_params->fl0size && iq_params->fl0addr && (iq_params->fl0id != 0xFFFF)) { cmdp->iqns_to_fl0congen |= htonl( - FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)| - FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) | - FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) | - FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen)); + FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)| + FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) | + FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) | + FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen)); cmdp->fl0dcaen_to_fl0cidxfthresh |= htons( - FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) | - FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) | - FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) | - FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) | - FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh)); + FW_IQ_CMD_FL0DCAEN_V(iq_params->fl0dcaen) | + FW_IQ_CMD_FL0DCACPU_V(iq_params->fl0dcacpu) | + FW_IQ_CMD_FL0FBMIN_V(iq_params->fl0fbmin) | + FW_IQ_CMD_FL0FBMAX_V(iq_params->fl0fbmax) | + FW_IQ_CMD_FL0CIDXFTHRESH_V(iq_params->fl0cidxfthresh)); cmdp->fl0size |= htons(iq_params->fl0size); cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr); } @@ -588,7 +588,7 @@ csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp, { struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb); - *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); + *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); if (*ret_val == FW_SUCCESS) { iq_params->physiqid = ntohs(rsp->physiqid); iq_params->iqid = ntohs(rsp->iqid); @@ -622,13 +622,13 @@ csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); - cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | FW_CMD_EXEC | - FW_IQ_CMD_PFN(iq_params->pfn) | - FW_IQ_CMD_VFN(iq_params->vfn)); - cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); - cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type)); + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(iq_params->pfn) | + FW_IQ_CMD_VFN_V(iq_params->vfn)); + cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iq_params->type)); cmdp->iqid = htons(iq_params->iqid); cmdp->fl0id = htons(iq_params->fl0id); @@ -657,12 +657,12 @@ csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); - cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | - FW_CMD_REQUEST | FW_CMD_EXEC | - FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) | - FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn)); - cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | + FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); + cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); } /* csio_mb_eq_ofld_alloc */ @@ -694,7 +694,8 @@ csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); uint32_t eq_start_stop = (eq_ofld_params->eqstart) ? - FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP; + FW_EQ_OFLD_CMD_EQSTART_F : + FW_EQ_OFLD_CMD_EQSTOP_F; /* * If this EQ write is cascaded with EQ alloc request, do not @@ -704,29 +705,29 @@ csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, if (!cascaded_req) CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); - cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE | - FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) | - FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn)); + cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | + FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); cmdp->alloc_to_len16 |= htonl(eq_start_stop | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); - cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid)); + cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid)); cmdp->fetchszm_to_iqid |= htonl( - FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) | - FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) | - FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) | - FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid)); + FW_EQ_OFLD_CMD_HOSTFCMODE_V(eq_ofld_params->hostfcmode) | + FW_EQ_OFLD_CMD_CPRIO_V(eq_ofld_params->cprio) | + FW_EQ_OFLD_CMD_PCIECHN_V(eq_ofld_params->pciechn) | + FW_EQ_OFLD_CMD_IQID_V(eq_ofld_params->iqid)); cmdp->dcaen_to_eqsize |= htonl( - FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) | - FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) | - FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) | - FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) | - FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) | - FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) | - FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize)); + FW_EQ_OFLD_CMD_DCAEN_V(eq_ofld_params->dcaen) | + FW_EQ_OFLD_CMD_DCACPU_V(eq_ofld_params->dcacpu) | + FW_EQ_OFLD_CMD_FBMIN_V(eq_ofld_params->fbmin) | + FW_EQ_OFLD_CMD_FBMAX_V(eq_ofld_params->fbmax) | + FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(eq_ofld_params->cidxfthresho) | + FW_EQ_OFLD_CMD_CIDXFTHRESH_V(eq_ofld_params->cidxfthresh) | + FW_EQ_OFLD_CMD_EQSIZE_V(eq_ofld_params->eqsize)); cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr); @@ -773,12 +774,12 @@ csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw, { struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb); - *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); + *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); if (*ret_val == FW_SUCCESS) { - eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET( + eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_G( ntohl(rsp->eqid_pkd)); - eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET( + eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_G( ntohl(rsp->physeqid_pkd)); } else eq_ofld_params->eqid = 0; @@ -807,13 +808,13 @@ csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); - cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | - FW_CMD_REQUEST | FW_CMD_EXEC | - FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) | - FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn)); - cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); - cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid)); + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | + FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); + cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid)); } /* csio_mb_eq_ofld_free */ @@ -840,15 +841,15 @@ csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); cmdp->op_to_portid = htonl(( - FW_CMD_OP(FW_FCOE_LINK_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | + FW_CMD_OP_V(FW_FCOE_LINK_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_FCOE_LINK_CMD_PORTID(port_id))); cmdp->sub_opcode_fcfi = htonl( FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) | FW_FCOE_LINK_CMD_FCFI(fcfi)); cmdp->lstatus = link_status; - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); } /* csio_write_fcoe_link_cond_init_mb */ @@ -873,11 +874,11 @@ csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp, CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); - cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ)); + cmdp->op_to_read = htonl((FW_CMD_OP_V(FW_FCOE_RES_INFO_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F)); - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); } /* csio_fcoe_read_res_info_init_mb */ @@ -908,13 +909,13 @@ csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); - cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | + cmdp->op_to_fcfi = htonl((FW_CMD_OP_V(FW_FCOE_VNP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_FCOE_VNP_CMD_FCFI(fcfi))); cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); @@ -948,11 +949,11 @@ csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, (struct fw_fcoe_vnp_cmd *)(mbp->mb); CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); - cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | + cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_FCOE_VNP_CMD_FCFI(fcfi)); - cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); } @@ -978,12 +979,12 @@ csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); - cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | + cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_FCOE_VNP_CMD_FCFI(fcfi)); cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE | - FW_CMD_LEN16(sizeof(*cmdp) / 16)); + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); } @@ -1009,11 +1010,11 @@ csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); - cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | + cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_FCF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_FCOE_FCF_CMD_FCFI(fcfi)); - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); } /* csio_fcoe_read_fcf_init_mb */ @@ -1029,9 +1030,9 @@ csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp, CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); mbp->mb_size = 64; - cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); - cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16)); + cmdp->op_to_flowid = htonl(FW_CMD_OP_V(FW_FCOE_STATS_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + cmdp->free_to_len16 = htonl(FW_CMD_LEN16_V(CSIO_MAX_MB_SIZE/16)); cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) | FW_FCOE_STATS_CMD_PORT(portparams->portid); @@ -1053,7 +1054,7 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw, uint8_t *src; uint8_t *dst; - *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16)); + *retval = FW_CMD_RETVAL_G(ntohl(rsp->free_to_len16)); memset(&stats, 0, sizeof(struct fw_fcoe_port_stats)); @@ -1125,7 +1126,7 @@ csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd) { struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd; - if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) { + if ((FW_DEBUG_CMD_TYPE_G(ntohl(dbg->op_type))) == 1) { csio_info(hw, "FW print message:\n"); csio_info(hw, "\tdebug->dprtstridx = %d\n", ntohs(dbg->u.prt.dprtstridx)); @@ -1305,7 +1306,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); fw_hdr = (struct fw_cmd_hdr *)&hdr; - switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) { + switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) { case FW_DEBUG_CMD: csio_mb_debug_cmd_handler(hw); continue; @@ -1406,9 +1407,9 @@ csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd) if (opcode == FW_PORT_CMD) { pcmd = (struct fw_port_cmd *)cmd; - port_id = FW_PORT_CMD_PORTID_GET( + port_id = FW_PORT_CMD_PORTID_G( ntohl(pcmd->op_to_portid)); - action = FW_PORT_CMD_ACTION_GET( + action = FW_PORT_CMD_ACTION_G( ntohl(pcmd->action_to_len16)); if (action != FW_PORT_ACTION_GET_PORT_INFO) { csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n", @@ -1417,15 +1418,15 @@ csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd) } link_status = ntohl(pcmd->u.info.lstatus_to_modtype); - mod_type = FW_PORT_CMD_MODTYPE_GET(link_status); + mod_type = FW_PORT_CMD_MODTYPE_G(link_status); hw->pport[port_id].link_status = - FW_PORT_CMD_LSTATUS_GET(link_status); + FW_PORT_CMD_LSTATUS_G(link_status); hw->pport[port_id].link_speed = - FW_PORT_CMD_LSPEED_GET(link_status); + FW_PORT_CMD_LSPEED_G(link_status); csio_info(hw, "Port:%x - LINK %s\n", port_id, - FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN"); + FW_PORT_CMD_LSTATUS_G(link_status) ? "UP" : "DOWN"); if (mod_type != hw->pport[port_id].mod_type) { hw->pport[port_id].mod_type = mod_type; @@ -1498,7 +1499,7 @@ csio_mb_isr_handler(struct csio_hw *hw) hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); fw_hdr = (struct fw_cmd_hdr *)&hdr; - switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) { + switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) { case FW_DEBUG_CMD: csio_mb_debug_cmd_handler(hw); return -EINVAL; @@ -1571,11 +1572,11 @@ csio_mb_tmo_handler(struct csio_hw *hw) fw_hdr = (struct fw_cmd_hdr *)(mbp->mb); csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn, - FW_CMD_OP_GET(ntohl(fw_hdr->hi))); + FW_CMD_OP_G(ntohl(fw_hdr->hi))); mbm->mcurrent = NULL; CSIO_INC_STATS(mbm, n_tmo); - fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT)); + fw_hdr->lo = htonl(FW_CMD_RETVAL_V(FW_ETIMEDOUT)); return mbp; } @@ -1624,10 +1625,10 @@ csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q) hdr = (struct fw_cmd_hdr *)(mbp->mb); csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n", - hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi))); + hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi))); CSIO_INC_STATS(mbm, n_cancel); - hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR)); + hdr->lo = htonl(FW_CMD_RETVAL_V(FW_HOSTERROR)); } } diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h index a84179e54ab93ed935fd8f7ade493c1b365a5ace..1bc82d0bc260259a84f16a03e2049a19844a85f2 100644 --- a/drivers/scsi/csiostor/csio_mb.h +++ b/drivers/scsi/csiostor/csio_mb.h @@ -79,14 +79,14 @@ enum csio_dev_state { }; #define FW_PARAM_DEV(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) #define FW_PARAM_PFVF(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ - FW_PARAMS_PARAM_Y(0) | \ - FW_PARAMS_PARAM_Z(0)) + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ + FW_PARAMS_PARAM_Y_V(0) | \ + FW_PARAMS_PARAM_Z_V(0)) enum { PAUSE_RX = 1 << 0, diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 4d0b6ce55f20ee8190bd2b7126d643471710e8fa..51ea5dc5f0848d7db70af3fdf6f6c6eaf63f1311 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -209,10 +209,10 @@ csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) struct csio_dma_buf *dma_buf; uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; - wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) | + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | FW_SCSI_CMD_WR_IMMDLEN(imm)); - wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | - FW_WR_LEN16( + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V( DIV_ROUND_UP(size, 16))); wr->cookie = (uintptr_t) req; @@ -301,7 +301,7 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, struct csio_dma_buf *dma_buf; struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE | + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE | ULPTX_NSGE(req->nsge)); /* Now add the data SGLs */ if (likely(!req->dcopy)) { @@ -370,10 +370,10 @@ csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size) uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); - wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) | + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) | FW_SCSI_READ_WR_IMMDLEN(imm)); - wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | - FW_WR_LEN16(DIV_ROUND_UP(size, 16))); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); wr->cookie = (uintptr_t)req; wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); wr->tmo_val = (uint8_t)(req->tmo); @@ -423,10 +423,10 @@ csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size) uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); - wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) | + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) | FW_SCSI_WRITE_WR_IMMDLEN(imm)); - wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | - FW_WR_LEN16(DIV_ROUND_UP(size, 16))); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); wr->cookie = (uintptr_t)req; wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); wr->tmo_val = (uint8_t)(req->tmo); @@ -653,9 +653,9 @@ csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, struct csio_rnode *rn = req->rnode; struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr; - wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR)); - wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | - FW_WR_LEN16( + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V( DIV_ROUND_UP(size, 16))); wr->cookie = (uintptr_t) req; diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h index 8d30e7ac1f5efb49941d27fbf36a7c7930200ed6..0c0dd9a658cc254d007560c2fe861860727c8743 100644 --- a/drivers/scsi/csiostor/csio_wr.h +++ b/drivers/scsi/csiostor/csio_wr.h @@ -101,7 +101,7 @@ /* WR status is at the same position as retval in a CMD header */ #define csio_wr_status(_wr) \ - (FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo))) + (FW_CMD_RETVAL_G(ntohl(((struct fw_cmd_hdr *)(_wr))->lo))) struct csio_hw; diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index e6c3f55d9d36f989381d93772065da2c27663813..69fbfc89efb697ba139228b71e8e9bb111ea185d 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -189,18 +189,18 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, unsigned int qid_atid = ((unsigned int)csk->atid) | (((unsigned int)csk->rss_qid) << 14); - opt0 = KEEP_ALIVE(1) | - WND_SCALE(wscale) | - MSS_IDX(csk->mss_idx) | - L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | - TX_CHAN(csk->tx_chan) | - SMAC_SEL(csk->smac_idx) | - ULP_MODE(ULP_MODE_ISCSI) | - RCV_BUFSIZ(cxgb4i_rcv_win >> 10); - opt2 = RX_CHANNEL(0) | - RSS_QUEUE_VALID | - (1 << 20) | - RSS_QUEUE(csk->rss_qid); + opt0 = KEEP_ALIVE_F | + WND_SCALE_V(wscale) | + MSS_IDX_V(csk->mss_idx) | + L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | + TX_CHAN_V(csk->tx_chan) | + SMAC_SEL_V(csk->smac_idx) | + ULP_MODE_V(ULP_MODE_ISCSI) | + RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); + opt2 = RX_CHANNEL_V(0) | + RSS_QUEUE_VALID_F | + (RX_FC_DISABLE_F) | + RSS_QUEUE_V(csk->rss_qid); if (is_t4(lldi->adapter_type)) { struct cpl_act_open_req *req = @@ -217,7 +217,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, req->params = cpu_to_be32(cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t)); - opt2 |= 1 << 22; + opt2 |= RX_FC_VALID_F; req->opt2 = cpu_to_be32(opt2); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, @@ -237,7 +237,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, req->local_ip = csk->saddr.sin_addr.s_addr; req->peer_ip = csk->daddr.sin_addr.s_addr; req->opt0 = cpu_to_be64(opt0); - req->params = cpu_to_be64(V_FILTER_TUPLE( + req->params = cpu_to_be64(FILTER_TUPLE_V( cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t))); @@ -272,19 +272,19 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, unsigned int qid_atid = ((unsigned int)csk->atid) | (((unsigned int)csk->rss_qid) << 14); - opt0 = KEEP_ALIVE(1) | - WND_SCALE(wscale) | - MSS_IDX(csk->mss_idx) | - L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | - TX_CHAN(csk->tx_chan) | - SMAC_SEL(csk->smac_idx) | - ULP_MODE(ULP_MODE_ISCSI) | - RCV_BUFSIZ(cxgb4i_rcv_win >> 10); + opt0 = KEEP_ALIVE_F | + WND_SCALE_V(wscale) | + MSS_IDX_V(csk->mss_idx) | + L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | + TX_CHAN_V(csk->tx_chan) | + SMAC_SEL_V(csk->smac_idx) | + ULP_MODE_V(ULP_MODE_ISCSI) | + RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); - opt2 = RX_CHANNEL(0) | - RSS_QUEUE_VALID | - RX_FC_DISABLE | - RSS_QUEUE(csk->rss_qid); + opt2 = RX_CHANNEL_V(0) | + RSS_QUEUE_VALID_F | + RX_FC_DISABLE_F | + RSS_QUEUE_V(csk->rss_qid); if (t4) { struct cpl_act_open_req6 *req = @@ -305,7 +305,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, req->opt0 = cpu_to_be64(opt0); - opt2 |= RX_FC_VALID; + opt2 |= RX_FC_VALID_F; req->opt2 = cpu_to_be32(opt2); req->params = cpu_to_be32(cxgb4_select_ntuple( @@ -328,10 +328,10 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, 8); req->opt0 = cpu_to_be64(opt0); - opt2 |= T5_OPT_2_VALID; + opt2 |= T5_OPT_2_VALID_F; req->opt2 = cpu_to_be32(opt2); - req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple( + req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t))); } @@ -452,7 +452,8 @@ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) INIT_TP_WR(req, csk->tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); - req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1)); + req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) + | RX_FORCE_ACK_F); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); return credits; } @@ -500,10 +501,10 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) skb = alloc_wr(flowclen, 0, GFP_ATOMIC); flowc = (struct fw_flowc_wr *)skb->head; flowc->op_to_nparams = - htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8)); + htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(8)); flowc->flowid_len16 = - htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) | - FW_WR_FLOWID(csk->tid)); + htonl(FW_WR_LEN16_V(DIV_ROUND_UP(72, 16)) | + FW_WR_FLOWID_V(csk->tid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; flowc->mnemval[0].val = htonl(csk->cdev->pfvf); flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; @@ -543,30 +544,31 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, { struct fw_ofld_tx_data_wr *req; unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; - unsigned int wr_ulp_mode = 0; + unsigned int wr_ulp_mode = 0, val; req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); if (is_ofld_imm(skb)) { - req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) | - FW_WR_COMPL(1) | - FW_WR_IMMDLEN(dlen)); - req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) | - FW_WR_LEN16(credits)); + req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL_F | + FW_WR_IMMDLEN_V(dlen)); + req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | + FW_WR_LEN16_V(credits)); } else { req->op_to_immdlen = - cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) | - FW_WR_COMPL(1) | - FW_WR_IMMDLEN(0)); + cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL_F | + FW_WR_IMMDLEN_V(0)); req->flowid_len16 = - cpu_to_be32(FW_WR_FLOWID(csk->tid) | - FW_WR_LEN16(credits)); + cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | + FW_WR_LEN16_V(credits)); } if (submode) - wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) | - FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode); + wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | + FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); + val = skb_peek(&csk->write_queue) ? 0 : 1; req->tunnel_to_proxy = htonl(wr_ulp_mode | - FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1)); + FW_OFLD_TX_DATA_WR_SHOVE_V(val)); req->plen = htonl(len); if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); @@ -1445,16 +1447,16 @@ static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, INIT_ULPTX_WR(req, wr_len, 0, 0); if (is_t4(lldi->adapter_type)) - req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | - (ULP_MEMIO_ORDER(1))); + req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | + (ULP_MEMIO_ORDER_F)); else - req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | - (V_T5_ULP_MEMIO_IMM(1))); - req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); - req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); + req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | + (T5_ULP_MEMIO_IMM_F)); + req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); + req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); - idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM)); + idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); idata->len = htonl(dlen); } @@ -1678,7 +1680,8 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi) cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); cdev->itp = &cxgb4i_iscsi_transport; - cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8; + cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) + << FW_VIID_PFN_S; pr_info("cdev 0x%p,%s, pfvf %u.\n", cdev, lldi->ports[0]->name, cdev->pfvf); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 308a016fdaeafd99c984788465a23983a1c71425..cd00a6cdf55b7e19eaf38c4c65e289f5744594b3 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1671,10 +1671,8 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { /* must set skb->dev before calling vlan_put_tag */ skb->dev = fcoe->realdev; - skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - vlan_dev_vlan_id(fcoe->netdev)); - if (!skb) - return -ENOMEM; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_dev_vlan_id(fcoe->netdev)); } else skb->dev = fcoe->netdev; diff --git a/drivers/ssb/pcihost_wrapper.c b/drivers/ssb/pcihost_wrapper.c index 69161bbc4d0b37f135afeebfaac7e2a3c548e685..410215c169208f1735ed3df0e76189bab8c980c8 100644 --- a/drivers/ssb/pcihost_wrapper.c +++ b/drivers/ssb/pcihost_wrapper.c @@ -11,15 +11,17 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include #include #include #include #include -#ifdef CONFIG_PM -static int ssb_pcihost_suspend(struct pci_dev *dev, pm_message_t state) +#ifdef CONFIG_PM_SLEEP +static int ssb_pcihost_suspend(struct device *d) { + struct pci_dev *dev = to_pci_dev(d); struct ssb_bus *ssb = pci_get_drvdata(dev); int err; @@ -28,17 +30,23 @@ static int ssb_pcihost_suspend(struct pci_dev *dev, pm_message_t state) return err; pci_save_state(dev); pci_disable_device(dev); - pci_set_power_state(dev, pci_choose_state(dev, state)); + + /* if there is a wakeup enabled child device on ssb bus, + enable pci wakeup posibility. */ + device_set_wakeup_enable(d, d->power.wakeup_path); + + pci_prepare_to_sleep(dev); return 0; } -static int ssb_pcihost_resume(struct pci_dev *dev) +static int ssb_pcihost_resume(struct device *d) { + struct pci_dev *dev = to_pci_dev(d); struct ssb_bus *ssb = pci_get_drvdata(dev); int err; - pci_set_power_state(dev, PCI_D0); + pci_back_from_sleep(dev); err = pci_enable_device(dev); if (err) return err; @@ -49,10 +57,12 @@ static int ssb_pcihost_resume(struct pci_dev *dev) return 0; } -#else /* CONFIG_PM */ -# define ssb_pcihost_suspend NULL -# define ssb_pcihost_resume NULL -#endif /* CONFIG_PM */ + +static const struct dev_pm_ops ssb_pcihost_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ssb_pcihost_suspend, ssb_pcihost_resume) +}; + +#endif /* CONFIG_PM_SLEEP */ static int ssb_pcihost_probe(struct pci_dev *dev, const struct pci_device_id *id) @@ -115,8 +125,9 @@ int ssb_pcihost_register(struct pci_driver *driver) { driver->probe = ssb_pcihost_probe; driver->remove = ssb_pcihost_remove; - driver->suspend = ssb_pcihost_suspend; - driver->resume = ssb_pcihost_resume; +#ifdef CONFIG_PM_SLEEP + driver->driver.pm = &ssb_pcihost_pm_ops; +#endif return pci_register_driver(driver); } diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c index bd6953af0a0396f715cb2622e5a4e6ca904b5228..3d26955da724bcab0ea5e872076752607a51f79b 100644 --- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c @@ -2856,8 +2856,10 @@ static int cfg80211_rtw_add_station(struct wiphy *wiphy, } static int cfg80211_rtw_del_station(struct wiphy *wiphy, - struct net_device *ndev, const u8 *mac) + struct net_device *ndev, + struct station_del_parameters *params) { + const u8 *mac = params->mac; int ret = 0; struct list_head *phead, *plist, *ptmp; u8 updated = 0; diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 2fbff907ce8a5bf8eee29613fad78247508bc4ad..dbc311c3dc377ca328e32a114b136609b708ac33 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -856,7 +856,9 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return 0; } -static void vnt_sw_scan_start(struct ieee80211_hw *hw) +static void vnt_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *addr) { struct vnt_private *priv = hw->priv; @@ -865,7 +867,8 @@ static void vnt_sw_scan_start(struct ieee80211_hw *hw) vnt_update_pre_ed_threshold(priv, true); } -static void vnt_sw_scan_complete(struct ieee80211_hw *hw) +static void vnt_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct vnt_private *priv = hw->priv; diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index ce87ce9bdb9c59fd0d5d5eaf886acb7fc4c2b799..7c6a95bcb35e4ec043679338211f2435f98ccd33 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1326,21 +1326,19 @@ static int iscsit_do_rx_data( struct iscsi_conn *conn, struct iscsi_data_count *count) { - int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; - struct kvec *iov_p; + int data = count->data_length, rx_loop = 0, total_rx = 0; struct msghdr msg; if (!conn || !conn->sock || !conn->conn_ops) return -1; memset(&msg, 0, sizeof(struct msghdr)); - - iov_p = count->iov; - iov_len = count->iov_count; + iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, + count->iov, count->iov_count, data); while (total_rx < data) { - rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, - (data - total_rx), MSG_WAITALL); + rx_loop = sock_recvmsg(conn->sock, &msg, + (data - total_rx), MSG_WAITALL); if (rx_loop <= 0) { pr_debug("rx_loop: %d total_rx: %d\n", rx_loop, total_rx); diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index 1ec8b7ffdccd40aadf3b70abfed20fd85935a649..c89e96cfa3e47b95d179d06910d528710e59d5de 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -303,7 +303,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags) struct page *page; int err; - page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL); + page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC); if (!page) return -ENOMEM; @@ -377,7 +377,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) if (page) put_page(page); if (req) - pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD); + pn_rx_submit(fp, req, GFP_ATOMIC); } /*-------------------------------------------------------------------------*/ @@ -437,7 +437,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) netif_carrier_on(dev); for (i = 0; i < phonet_rxq_size; i++) - pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD); + pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC); } spin_unlock(&port->lock); return 0; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index a935c254749e29a339afdede2064d0e5aa9c006d..14419a8ccbb6b138aa8bd38b7765166c1f4aa398 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -343,7 +343,6 @@ static void handle_tx(struct vhost_net *net) .msg_namelen = 0, .msg_control = NULL, .msg_controllen = 0, - .msg_iov = vq->iov, .msg_flags = MSG_DONTWAIT, }; size_t len, total_len = 0; @@ -397,8 +396,8 @@ static void handle_tx(struct vhost_net *net) } /* Skip header. TODO: support TSO. */ s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out); - msg.msg_iovlen = out; len = iov_length(vq->iov, out); + iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len); /* Sanity check */ if (!len) { vq_err(vq, "Unexpected header len for TX: " @@ -568,7 +567,6 @@ static void handle_rx(struct vhost_net *net) .msg_namelen = 0, .msg_control = NULL, /* FIXME: get and handle RX aux data. */ .msg_controllen = 0, - .msg_iov = vq->iov, .msg_flags = MSG_DONTWAIT, }; struct virtio_net_hdr_mrg_rxbuf hdr = { @@ -606,7 +604,7 @@ static void handle_rx(struct vhost_net *net) break; /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { - msg.msg_iovlen = 1; + iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); err = sock->ops->recvmsg(NULL, sock, &msg, 1, MSG_DONTWAIT | MSG_TRUNC); pr_debug("Discarded rx packet: len %zd\n", sock_len); @@ -632,7 +630,7 @@ static void handle_rx(struct vhost_net *net) /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: * needed because recvmsg can modify msg_iov. */ copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in); - msg.msg_iovlen = in; + iov_iter_init(&msg.msg_iter, READ, vq->iov, in, sock_len); err = sock->ops->recvmsg(NULL, sock, &msg, sock_len, MSG_DONTWAIT | MSG_TRUNC); /* Userspace might have consumed the packet meanwhile: diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 03a3beb170048df40c436dff51c41373104cf9e7..06e14bfb3496c9d9aab923243c104addfbff7196 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -306,8 +306,8 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, _debug("- range %u-%u%s", offset, to, msg->msg_flags ? " [more]" : ""); - msg->msg_iov = (struct iovec *) iov; - msg->msg_iovlen = 1; + iov_iter_init(&msg->msg_iter, WRITE, + (struct iovec *) iov, 1, to - offset); /* have to change the state *before* sending the last * packet as RxRPC might give us the reply before it @@ -384,8 +384,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, msg.msg_name = NULL; msg.msg_namelen = 0; - msg.msg_iov = (struct iovec *) iov; - msg.msg_iovlen = 1; + iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)iov, 1, + call->request_size); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = (call->send_pages ? MSG_MORE : 0); @@ -778,8 +778,7 @@ void afs_send_empty_reply(struct afs_call *call) iov[0].iov_len = 0; msg.msg_name = NULL; msg.msg_namelen = 0; - msg.msg_iov = iov; - msg.msg_iovlen = 0; + iov_iter_init(&msg.msg_iter, WRITE, iov, 0, 0); /* WTF? */ msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -815,8 +814,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) iov[0].iov_len = len; msg.msg_name = NULL; msg.msg_namelen = 0; - msg.msg_iov = iov; - msg.msg_iovlen = 1; + iov_iter_init(&msg.msg_iter, WRITE, iov, 1, len); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index e9c3afe4b5d3c306a5dd61692ed34805ca661f7a..4e1d7268b004faff8f79730003757483689c715e 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include "xdr4.h" #include "xdr4cb.h" #include "vfs.h" @@ -594,7 +594,7 @@ static int delegation_blocked(struct knfsd_fh *fh) } spin_unlock(&blocked_delegations_lock); } - hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); + hash = jhash(&fh->fh_base, fh->fh_size, 0); if (test_bit(hash&255, bd->set[0]) && test_bit((hash>>8)&255, bd->set[0]) && test_bit((hash>>16)&255, bd->set[0])) @@ -613,7 +613,7 @@ static void block_delegations(struct knfsd_fh *fh) u32 hash; struct bloom_pair *bd = &blocked_delegations; - hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); + hash = jhash(&fh->fh_base, fh->fh_size, 0); spin_lock(&blocked_delegations_lock); __set_bit(hash&255, bd->set[bd->new]); diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h deleted file mode 100644 index b6312843dbd92cd6bf1a636d01212960759ccf71..0000000000000000000000000000000000000000 --- a/include/asm-generic/hash.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_GENERIC_HASH_H -#define __ASM_GENERIC_HASH_H - -struct fast_hash_ops; -static inline void setup_arch_fast_hash(struct fast_hash_ops *ops) -{ -} - -#endif /* __ASM_GENERIC_HASH_H */ diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index a495a959e8a754939b9c8c9d9bd748a73f54f587..33eb274cd0e6b61be2b000e3e30c82f2ea6c7f1e 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -31,8 +31,11 @@ struct ath9k_platform_data { u32 gpio_mask; u32 gpio_val; + bool endian_check; bool is_clk_25mhz; bool tx_gain_buffalo; + bool disable_2ghz; + bool disable_5ghz; int (*get_mac_revision)(void); int (*external_reset)(void); diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 729f48e6b20b21564bfdbc4d958fd0bc327f740d..eb1c6a47b67f2acf99df011962da81c76398b2d7 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -447,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); #define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ extern u32 bcma_core_dma_translation(struct bcma_device *core); +extern unsigned int bcma_core_irq(struct bcma_device *core, int num); + #endif /* LINUX_BCMA_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index fb61f3fb4ddbb1c208b2557f7aace2c692c6632e..0b3b32aeeb8af8c76b9ed23ff0017ef79e92e635 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h @@ -43,12 +43,12 @@ struct bcma_drv_mips { extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); -extern unsigned int bcma_core_irq(struct bcma_device *core); +extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); #else static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } -static inline unsigned int bcma_core_irq(struct bcma_device *core) +static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) { return 0; } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3cf91754a957e5f509bf5f97050a559cab048878..bbfceb7564523bc5e17037e25663185aee02311e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -22,7 +22,7 @@ struct bpf_map_ops { /* funcs callable from userspace and from eBPF programs */ void *(*map_lookup_elem)(struct bpf_map *map, void *key); - int (*map_update_elem)(struct bpf_map *map, void *key, void *value); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); int (*map_delete_elem)(struct bpf_map *map, void *key); }; @@ -128,9 +128,18 @@ struct bpf_prog_aux { struct work_struct work; }; +#ifdef CONFIG_BPF_SYSCALL void bpf_prog_put(struct bpf_prog *prog); +#else +static inline void bpf_prog_put(struct bpf_prog *prog) {} +#endif struct bpf_prog *bpf_prog_get(u32 ufd); /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); +/* verifier prototypes for helper functions called from eBPF programs */ +extern struct bpf_func_proto bpf_map_lookup_elem_proto; +extern struct bpf_func_proto bpf_map_update_elem_proto; +extern struct bpf_func_proto bpf_map_delete_elem_proto; + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index b37ea95bc348f15b16a9c0d095588379b12edbee..c05ff0f9f9a55afec230435183dcce0d93bd5725 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -127,6 +127,9 @@ void unregister_candev(struct net_device *dev); int can_restart_now(struct net_device *dev); void can_bus_off(struct net_device *dev); +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state); + void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 733980fce8e30d6ddd2b442d407a3fac877a759a..41c891d05f0434058a25bb45b069880027609401 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -392,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b) #endif } +/** + * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame + * @skb: Buffer to pad + * + * An Ethernet frame should have a minimum size of 60 bytes. This function + * takes short frames and pads them with zeros up to the 60 byte limit. + */ +static inline int eth_skb_pad(struct sk_buff *skb) +{ + return skb_put_padto(skb, ETH_ZLEN); +} + #endif /* _LINUX_ETHERDEVICE_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c1a2d60dfb82769c42b65dc2adc324517ed7e79c..653dc9c4ebac7a176183af8e2a8ae26076a03eca 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -59,6 +59,26 @@ enum ethtool_phys_id_state { ETHTOOL_ID_OFF }; +enum { + ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ + ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ + + /* + * Add your fresh new hash function bits above and remember to update + * rss_hash_func_strings[] in ethtool.c + */ + ETH_RSS_HASH_FUNCS_COUNT +}; + +#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) +#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT) + +#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) +#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) + +#define ETH_RSS_HASH_UNKNOWN 0 +#define ETH_RSS_HASH_NO_CHANGE 0 + struct net_device; /* Some generic methods drivers may use in their ethtool_ops */ @@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * Returns zero if not supported for this specific device. * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. * Returns zero if not supported for this specific device. - * @get_rxfh: Get the contents of the RX flow hash indirection table and hash - * key. - * Will only be called if one or both of @get_rxfh_indir_size and - * @get_rxfh_key_size are implemented and return non-zero. - * Returns a negative error code or zero. - * @set_rxfh: Set the contents of the RX flow hash indirection table and/or - * hash key. In case only the indirection table or hash key is to be - * changed, the other argument will be %NULL. - * Will only be called if one or both of @get_rxfh_indir_size and - * @get_rxfh_key_size are implemented and return non-zero. + * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key + * and/or hash function. * Returns a negative error code or zero. + * @set_rxfh: Set the contents of the RX flow hash indirection table, hash + * key, and/or hash function. Arguments which are set to %NULL or zero + * will remain unchanged. + * Returns a negative error code or zero. An error code must be returned + * if at least one unsupported change was requested. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or * zero. @@ -241,9 +258,10 @@ struct ethtool_ops { int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); - int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); + int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, + u8 *hfunc); int (*set_rxfh)(struct net_device *, const u32 *indir, - const u8 *key); + const u8 *key, const u8 hfunc); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); diff --git a/include/linux/filter.h b/include/linux/filter.h index ca95abd2bed130ac3c76161e7ae273e59c48c51b..caac2087a4d5e7479e272b6dc5da23d7c90c9fd7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -381,6 +381,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_detach_filter(struct sock *sk); int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); diff --git a/include/linux/hash.h b/include/linux/hash.h index d0494c399392c801dfce96d8a141d935d750a22e..1afde47e1528c36ad8613c2e56fb23d1f614e476 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -15,7 +15,6 @@ */ #include -#include #include /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ @@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr) return (u32)val; } -struct fast_hash_ops { - u32 (*hash)(const void *data, u32 len, u32 seed); - u32 (*hash2)(const u32 *data, u32 len, u32 seed); -}; - -/** - * arch_fast_hash - Caclulates a hash over a given buffer that can have - * arbitrary size. This function will eventually use an - * architecture-optimized hashing implementation if - * available, and trades off distribution for speed. - * - * @data: buffer to hash - * @len: length of buffer in bytes - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash(const void *data, u32 len, u32 seed); - -/** - * arch_fast_hash2 - Caclulates a hash over a given buffer that has a - * size that is of a multiple of 32bit words. This - * function will eventually use an architecture- - * optimized hashing implementation if available, - * and trades off distribution for speed. - * - * @data: buffer to hash (must be 32bit padded) - * @len: number of 32bit words - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed); - #endif /* _LINUX_HASH_H */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 08cfaff8a072313e5e3ed634162cf67c948c4ad5..476c685ca6f9c5c6447944aee2fa98a85cdd14e4 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -650,6 +650,8 @@ struct vmbus_channel { u8 monitor_grp; u8 monitor_bit; + bool rescind; /* got rescind msg */ + u32 ringbuffer_gpadlhandle; /* Allocated memory for ring buffer */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b1be39c76931b5084ee4bebab2c237021d31e109..4f4eea8a62882914a86c11a084928a02225c6e0b 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -19,6 +19,7 @@ #include #include #include +#include /* * DS bit usage @@ -1066,6 +1067,12 @@ struct ieee80211_pspoll { /* TDLS */ +/* Channel switch timing */ +struct ieee80211_ch_switch_timing { + __le16 switch_time; + __le16 switch_timeout; +} __packed; + /* Link-id information element */ struct ieee80211_tdls_lnkie { u8 ie_type; /* Link Identifier IE */ @@ -1107,6 +1114,15 @@ struct ieee80211_tdls_data { u8 dialog_token; u8 variable[0]; } __packed discover_req; + struct { + u8 target_channel; + u8 oper_class; + u8 variable[0]; + } __packed chan_switch_req; + struct { + __le16 status_code; + u8 variable[0]; + } __packed chan_switch_resp; } u; } __packed; @@ -1274,7 +1290,7 @@ struct ieee80211_ht_cap { #define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 /* - * Maximum length of AMPDU that the STA can receive. + * Maximum length of AMPDU that the STA can receive in high-throughput (HT). * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) */ enum ieee80211_max_ampdu_length_exp { @@ -1284,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp { IEEE80211_HT_MAX_AMPDU_64K = 3 }; +/* + * Maximum length of AMPDU that the STA can receive in VHT. + * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ +enum ieee80211_vht_max_ampdu_length_exp { + IEEE80211_VHT_MAX_AMPDU_8K = 0, + IEEE80211_VHT_MAX_AMPDU_16K = 1, + IEEE80211_VHT_MAX_AMPDU_32K = 2, + IEEE80211_VHT_MAX_AMPDU_64K = 3, + IEEE80211_VHT_MAX_AMPDU_128K = 4, + IEEE80211_VHT_MAX_AMPDU_256K = 5, + IEEE80211_VHT_MAX_AMPDU_512K = 6, + IEEE80211_VHT_MAX_AMPDU_1024K = 7 +}; + #define IEEE80211_HT_MAX_AMPDU_FACTOR 13 /* Minimum MPDU start spacing */ @@ -1998,6 +2029,16 @@ enum ieee80211_tdls_actioncode { WLAN_TDLS_DISCOVERY_REQUEST = 10, }; +/* Extended Channel Switching capability to be set in the 1st byte of + * the @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) + +/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ +#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) +#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) +#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) + /* Interworking capabilities are set in 7th bit of 4th byte of the * @WLAN_EID_EXT_CAPABILITY information element */ @@ -2009,6 +2050,7 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) +#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) @@ -2016,6 +2058,9 @@ enum ieee80211_tdls_actioncode { /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 +/* BSS Coex IE information field bits */ +#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) + /** * enum - mesh synchronization method identifier * @@ -2398,6 +2443,30 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, return !!(tim->virtual_map[index] & mask); } +/** + * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) + * @skb: the skb containing the frame, length will not be checked + * @hdr_size: the size of the ieee80211_hdr that starts at skb->data + * + * This function assumes the frame is a data frame, and that the network header + * is in the correct place. + */ +static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) +{ + if (!skb_is_nonlinear(skb) && + skb->len > (skb_network_offset(skb) + 2)) { + /* Point to where the indication of TDLS should start */ + const u8 *tdls_data = skb_network_header(skb) - 2; + + if (get_unaligned_be16(tdls_data) == ETH_P_TDLS && + tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE && + tdls_data[3] == WLAN_CATEGORY_TDLS) + return tdls_data[4]; + } + + return -1; +} + /* convert time units */ #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) diff --git a/include/net/ieee802154.h b/include/linux/ieee802154.h similarity index 79% rename from include/net/ieee802154.h rename to include/linux/ieee802154.h index 0aa7122e8f15390b4b6158429a245057fdd23e5d..6e82d888287c280e096bc4c8c4cd585f786fb507 100644 --- a/include/net/ieee802154.h +++ b/include/linux/ieee802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Pavel Smolenskiy * Maxim Gorbachyov @@ -24,10 +20,27 @@ * Alexander Smirnov */ -#ifndef NET_IEEE802154_H -#define NET_IEEE802154_H +#ifndef LINUX_IEEE802154_H +#define LINUX_IEEE802154_H + +#include +#include +#include #define IEEE802154_MTU 127 +#define IEEE802154_MIN_PSDU_LEN 5 + +#define IEEE802154_PAN_ID_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe + +#define IEEE802154_EXTENDED_ADDR_LEN 8 + +#define IEEE802154_LIFS_PERIOD 40 +#define IEEE802154_SIFS_PERIOD 12 + +#define IEEE802154_MAX_CHANNEL 26 +#define IEEE802154_MAX_PAGE 31 #define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ #define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ @@ -189,7 +202,41 @@ enum { IEEE802154_SCAN_IN_PROGRESS = 0xfc, }; +/** + * ieee802154_is_valid_psdu_len - check if psdu len is valid + * @len: psdu len with (MHR + payload + MFR) + */ +static inline bool ieee802154_is_valid_psdu_len(const u8 len) +{ + return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); +} + +/** + * ieee802154_is_valid_psdu_len - check if extended addr is valid + * @addr: extended addr to check + */ +static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) +{ + /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff + * is used internally as extended to short address broadcast mapping. + * This is currently a workaround because neighbor discovery can't + * deal with short addresses types right now. + */ + return ((addr != cpu_to_le64(0x0000000000000000ULL)) && + (addr != cpu_to_le64(0xffffffffffffffffULL))); +} -#endif +/** + * ieee802154_random_extended_addr - generates a random extended address + * @addr: extended addr pointer to place the random address + */ +static inline void ieee802154_random_extended_addr(__le64 *addr) +{ + get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); + /* toggle some bit if we hit an invalid extended addr */ + if (!ieee802154_is_valid_extended_addr(*addr)) + ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; +} +#endif /* LINUX_IEEE802154_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 808dcb8cc04fbeee82f00bab944ff45e8087078a..0a8ce762a47fded2031d83ec3535feaed062806a 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -15,6 +15,7 @@ #include #include +#include struct br_ip { union { @@ -32,11 +33,41 @@ struct br_ip_list { struct br_ip addr; }; +#define BR_HAIRPIN_MODE BIT(0) +#define BR_BPDU_GUARD BIT(1) +#define BR_ROOT_BLOCK BIT(2) +#define BR_MULTICAST_FAST_LEAVE BIT(3) +#define BR_ADMIN_COST BIT(4) +#define BR_LEARNING BIT(5) +#define BR_FLOOD BIT(6) +#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) +#define BR_PROMISC BIT(7) +#define BR_PROXYARP BIT(8) +#define BR_LEARNING_SYNC BIT(9) + extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; +#if IS_ENABLED(CONFIG_BRIDGE) +int br_fdb_external_learn_add(struct net_device *dev, + const unsigned char *addr, u16 vid); +int br_fdb_external_learn_del(struct net_device *dev, + const unsigned char *addr, u16 vid); +#else +static inline int br_fdb_external_learn_add(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + return 0; +} +static inline int br_fdb_external_learn_del(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + return 0; +} +#endif + #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d69f0577a319d6875ee62352717f45bbfe447a23..515a35e2a48ab7e55d550fcd164466080773b3ce 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, } /** - * vlan_insert_tag - regular VLAN tag inserting + * __vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload - * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. - * - * Following the skb_unshare() example, in case of error, the calling function - * doesn't have to worry about freeing the original skb. + * Returns error if skb_cow_head failes. * * Does not change skb->protocol so this function can be used during receive. */ -static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline int __vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { struct vlan_ethhdr *veth; - if (skb_cow_head(skb, VLAN_HLEN) < 0) { - dev_kfree_skb_any(skb); - return NULL; - } + if (skb_cow_head(skb, VLAN_HLEN) < 0) + return -ENOMEM; + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); /* Move the mac addresses to the beginning of the new header. */ @@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, /* now, the TCI */ veth->h_vlan_TCI = htons(vlan_tci); + return 0; +} + +/** + * vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + int err; + + err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); + if (err) { + dev_kfree_skb_any(skb); + return NULL; + } return skb; } /** - * __vlan_put_tag - regular VLAN tag inserting + * vlan_insert_tag_set_proto - regular VLAN tag inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload @@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci) { skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); if (skb) @@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, return skb; } -/** - * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting +/* + * __vlan_hwaccel_push_inside - pushes vlan tag to the payload * @skb: skbuff to tag - * @vlan_proto: VLAN encapsulation protocol - * @vlan_tci: VLAN TCI to insert * - * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest + * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, - __be16 vlan_proto, - u16 vlan_tci) +static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) { - skb->vlan_proto = vlan_proto; - skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); + if (likely(skb)) + skb->vlan_tci = 0; + return skb; +} +/* + * vlan_hwaccel_push_inside - pushes vlan tag to the payload + * @skb: skbuff to tag + * + * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the + * VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb)) + skb = __vlan_hwaccel_push_inside(skb); return skb; } /** - * vlan_put_tag - inserts VLAN tag according to device features + * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * - * Assumes skb->dev is the target that will xmit this frame. - * Returns a VLAN tagged skb. + * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest */ -static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { - if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { - return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); - } else { - return __vlan_put_tag(skb, vlan_proto, vlan_tci); - } + skb->vlan_proto = vlan_proto; + skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; } /** diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ff560537dd61b334a3dfbfb559dfa0f1cff20f41..c694e7baa621b3f76d220589e018dd767fa7bfd0 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -42,6 +42,7 @@ struct ipv6_devconf { __s32 accept_ra_from_local; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD __s32 optimistic_dad; + __s32 use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE __s32 mc_forwarding; @@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) #define tcp_twsk_ipv6only(__sk) 0 #define inet_v6_ipv6only(__sk) 0 #endif /* IS_ENABLED(CONFIG_IPV6) */ - -#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ - (((__sk)->sk_portpair == (__ports)) && \ - ((__sk)->sk_family == AF_INET6) && \ - ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ - ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) - #endif /* _IPV6_H */ diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 8e9a029e093d6522b68c0929aad93704f47c3ad5..e6982ac3200d65a537f2561bd51c2a0696b62e70 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -16,6 +16,7 @@ #define MARVELL_PHY_ID_88E1318S 0x01410e90 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 +#define MARVELL_PHY_ID_88E3016 0x01410e60 /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 53d33dee70e19a657571d97e773e35d63afa0153..2e5b194b9b1900b9d5304cbdb52f14aac576b85e 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,7 +37,6 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 -#define MICREL_PHY_25MHZ_CLK 0x00000002 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 379c02648ab3b36029ba459534416ebe224cc282..64d25941b329918e2c8f232e02431dabfcbc3c8f 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -67,6 +67,8 @@ enum { MLX4_CMD_MAP_ICM_AUX = 0xffc, MLX4_CMD_UNMAP_ICM_AUX = 0xffb, MLX4_CMD_SET_ICM_SIZE = 0xffd, + MLX4_CMD_ACCESS_REG = 0x3b, + /*master notify fw on finish for slave's flr*/ MLX4_CMD_INFORM_FLR_DONE = 0x5b, MLX4_CMD_GET_OP_REQ = 0x59, @@ -197,6 +199,33 @@ enum { MLX4_CMD_NATIVE }; +/* + * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP - + * Receive checksum value is reported in CQE also for non TCP/UDP packets. + * + * MLX4_RX_CSUM_MODE_L4 - + * L4_CSUM bit in CQE, which indicates whether or not L4 checksum + * was validated correctly, is supported. + * + * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP - + * IP_OK CQE's field is supported also for non TCP/UDP IP packets. + * + * MLX4_RX_CSUM_MODE_MULTI_VLAN - + * Receive Checksum offload is supported for packets with more than 2 vlan headers. + */ +enum mlx4_rx_csum_mode { + MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0, + MLX4_RX_CSUM_MODE_L4 = 1UL << 1, + MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2, + MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3 +}; + +struct mlx4_config_dev_params { + u16 vxlan_udp_dport; + u8 rx_csum_flags_port_1; + u8 rx_csum_flags_port_2; +}; + struct mlx4_dev; struct mlx4_cmd_mailbox { @@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); +int mlx4_config_dev_retrieval(struct mlx4_dev *dev, + struct mlx4_config_dev_params *params); /* * mlx4_get_slave_default_vlan - * return true if VST ( default vlan) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 37e4404d0227f8c3420bb9e14d8d943b5d734730..25c791e295fd5355650a3db42bc69ddc1adf2cc8 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -95,7 +95,7 @@ enum { enum { MLX4_MAX_NUM_PF = 16, - MLX4_MAX_NUM_VF = 64, + MLX4_MAX_NUM_VF = 126, MLX4_MAX_NUM_VF_P_PORT = 64, MLX4_MFUNC_MAX = 80, MLX4_MAX_EQ_NUM = 1024, @@ -117,6 +117,14 @@ enum { MLX4_STEERING_MODE_DEVICE_MANAGED }; +enum { + MLX4_STEERING_DMFS_A0_DEFAULT, + MLX4_STEERING_DMFS_A0_DYNAMIC, + MLX4_STEERING_DMFS_A0_STATIC, + MLX4_STEERING_DMFS_A0_DISABLE, + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED +}; + static inline const char *mlx4_steering_mode_str(int steering_mode) { switch (steering_mode) { @@ -186,7 +194,31 @@ enum { MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, - MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 + MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, + MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, + MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, + MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, + MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, + MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, + MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 +}; + +enum { + MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0, + MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 +}; + +/* bit enums for an 8-bit flags field indicating special use + * QPs which require special handling in qp_reserve_range. + * Currently, this only includes QPs used by the ETH interface, + * where we expect to use blueflame. These QPs must not have + * bits 6 and 7 set in their qp number. + * + * This enum may use only bits 0..7. + */ +enum { + MLX4_RESERVE_A0_QP = 1 << 6, + MLX4_RESERVE_ETH_BF_QP = 1 << 7, }; enum { @@ -202,7 +234,8 @@ enum { enum { MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, - MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 + MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, + MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 }; @@ -328,6 +361,8 @@ enum { enum mlx4_qp_region { MLX4_QP_REGION_FW = 0, + MLX4_QP_REGION_RSS_RAW_ETH, + MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH, MLX4_QP_REGION_ETH_ADDR, MLX4_QP_REGION_FC_ADDR, MLX4_QP_REGION_FC_EXCH, @@ -379,6 +414,13 @@ enum { #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) +enum mlx4_module_id { + MLX4_MODULE_ID_SFP = 0x3, + MLX4_MODULE_ID_QSFP = 0xC, + MLX4_MODULE_ID_QSFP_PLUS = 0xD, + MLX4_MODULE_ID_QSFP28 = 0x11, +}; + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; @@ -433,6 +475,7 @@ struct mlx4_caps { int num_cqs; int max_cqes; int reserved_cqs; + int num_sys_eqs; int num_eqs; int reserved_eqs; int num_comp_vectors; @@ -449,6 +492,7 @@ struct mlx4_caps { int reserved_mcgs; int num_qp_per_mgm; int steering_mode; + int dmfs_high_steer_mode; int fs_log_max_ucast_qp_range_size; int num_pds; int reserved_pds; @@ -487,6 +531,10 @@ struct mlx4_caps { u16 hca_core_clock; u64 phys_port_id[MLX4_MAX_PORTS + 1]; int tunnel_offload_mode; + u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; + u8 alloc_res_qp_mask; + u32 dmfs_high_rate_qpn_base; + u32 dmfs_high_rate_qpn_range; }; struct mlx4_buf_list { @@ -607,6 +655,11 @@ struct mlx4_cq { atomic_t refcount; struct completion free; + struct { + struct list_head list; + void (*comp)(struct mlx4_cq *); + void *priv; + } tasklet_ctx; }; struct mlx4_qp { @@ -799,6 +852,26 @@ struct mlx4_init_port_param { u64 si_guid; }; +#define MAD_IFC_DATA_SZ 192 +/* MAD IFC Mailbox */ +struct mlx4_mad_ifc { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + __be16 class_specific; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 mkey; + __be16 dr_slid; + __be16 dr_dlid; + u8 reserved[28]; + u8 data[MAD_IFC_DATA_SZ]; +} __packed; + #define mlx4_foreach_port(port, dev, type) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ if ((type) == (dev)->caps.port_mask[(port)]) @@ -835,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) { return (qpn < dev->phys_caps.base_sqpn + 8 + - 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); + 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) && + qpn >= dev->phys_caps.base_sqpn) || + (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); } static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) @@ -911,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed, int timestamp_en); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); - -int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags); void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, @@ -1283,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry); +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data); + /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) { return is_kdump_kernel(); } +/* ACCESS REG commands */ +enum mlx4_access_reg_method { + MLX4_ACCESS_REG_QUERY = 0x1, + MLX4_ACCESS_REG_WRITE = 0x2, +}; + +/* ACCESS PTYS Reg command */ +enum mlx4_ptys_proto { + MLX4_PTYS_IB = 1<<0, + MLX4_PTYS_EN = 1<<2, +}; + +struct mlx4_ptys_reg { + u8 resrvd1; + u8 local_port; + u8 resrvd2; + u8 proto_mask; + __be32 resrvd3[2]; + __be32 eth_proto_cap; + __be16 ib_width_cap; + __be16 ib_speed_cap; + __be32 resrvd4; + __be32 eth_proto_admin; + __be16 ib_width_admin; + __be16 ib_speed_admin; + __be32 resrvd5; + __be32 eth_proto_oper; + __be16 ib_width_oper; + __be16 ib_speed_oper; + __be32 resrvd6; + __be32 eth_proto_lp_adv; +} __packed; + +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg); + #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 5f4e36cf00910c7220b743fd06f5471cdd7dc441..467ccdf94c981f7a7f90b04b6d2855c4b4a56d18 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -120,13 +120,15 @@ enum { MLX4_RSS_QPC_FLAG_OFFSET = 13, }; +#define MLX4_EN_RSS_KEY_SIZE 40 + struct mlx4_rss_context { __be32 base_qpn; __be32 default_qpn; u16 reserved; u8 hash_fn; u8 flags; - __be32 rss_key[10]; + __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)]; __be32 base_qpn_udp; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1d67fd32e71c4bd6f1622e5465dfbd2ba3e95b99..ea4f1c46f761127bf01c87b5d8933bd09819d1b7 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -219,23 +219,15 @@ enum { }; enum { - MLX5_DEV_CAP_FLAG_RC = 1LL << 0, - MLX5_DEV_CAP_FLAG_UC = 1LL << 1, - MLX5_DEV_CAP_FLAG_UD = 1LL << 2, MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, - MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6, MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, MLX5_DEV_CAP_FLAG_APM = 1LL << 17, MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, - MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, - MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, - MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, - MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 246310dc8bef9a8f8604941ee40353868ebd3965..b1bf41556b3245fc9970174f5be872202a58eec5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -633,14 +633,6 @@ static inline void *mlx5_vzalloc(unsigned long size) return rtn; } -static inline void mlx5_vfree(const void *addr) -{ - if (addr && is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); -} - static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index dcfdecbfa0b711e52f3f629ec02ea3cbd70a6c1d..8e30685affeb3a40b00fffbdbdcb3edbb5b10ead 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -47,9 +47,9 @@ enum { NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ - NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_MPLS_BIT, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ @@ -118,7 +118,7 @@ enum { #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) -#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) +#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) @@ -181,7 +181,6 @@ enum { NETIF_F_GSO_IPIP | \ NETIF_F_GSO_SIT | \ NETIF_F_GSO_UDP_TUNNEL | \ - NETIF_F_GSO_UDP_TUNNEL_CSUM | \ - NETIF_F_GSO_MPLS) + NETIF_F_GSO_UDP_TUNNEL_CSUM) #endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 74fd5d37f15a472239c1c63cebe7ce9e59fc4c90..c31f74d76ebd3c595160a4b3b513594423f43240 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -57,6 +57,8 @@ struct device; struct phy_device; /* 802.11 specific */ struct wireless_dev; +/* 802.15.4 specific */ +struct wpan_dev; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -314,6 +316,7 @@ struct napi_struct { struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; + struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; @@ -386,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); +void __napi_schedule_irqoff(struct napi_struct *n); static inline bool napi_disable_pending(struct napi_struct *n) { @@ -420,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n) __napi_schedule(n); } +/** + * napi_schedule_irqoff - schedule NAPI poll + * @n: napi context + * + * Variant of napi_schedule(), assuming hard irqs are masked. + */ +static inline void napi_schedule_irqoff(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule_irqoff(n); +} + /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ static inline bool napi_reschedule(struct napi_struct *napi) { @@ -430,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } +void __napi_complete(struct napi_struct *n); +void napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete * @n: napi context * * Mark NAPI processing as complete. + * Consider using napi_complete_done() instead. */ -void __napi_complete(struct napi_struct *n); -void napi_complete(struct napi_struct *n); +static inline void napi_complete(struct napi_struct *n) +{ + return napi_complete_done(n, 0); +} /** * napi_by_id - lookup a NAPI by napi_id @@ -472,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi); * Stop NAPI from being scheduled on this context. * Waits till any outstanding processing completes. */ -static inline void napi_disable(struct napi_struct *n) -{ - might_sleep(); - set_bit(NAPI_STATE_DISABLE, &n->state); - while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); - clear_bit(NAPI_STATE_DISABLE, &n->state); -} +void napi_disable(struct napi_struct *n); /** * napi_enable - enable NAPI scheduling @@ -740,13 +754,13 @@ struct netdev_fcoe_hbainfo { }; #endif -#define MAX_PHYS_PORT_ID_LEN 32 +#define MAX_PHYS_ITEM_ID_LEN 32 -/* This structure holds a unique identifier to identify the - * physical port used by a netdevice. +/* This structure holds a unique identifier to identify some + * physical item (port for example) used by a netdevice. */ -struct netdev_phys_port_id { - unsigned char id[MAX_PHYS_PORT_ID_LEN]; +struct netdev_phys_item_id { + unsigned char id[MAX_PHYS_ITEM_ID_LEN]; unsigned char id_len; }; @@ -937,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr, u16 flags) + * const unsigned char *addr, u16 vid, u16 flags) * Adds an FDB entry to dev for addr. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr) + * const unsigned char *addr, u16 vid) * Deletes the FDB entry from dev coresponding to addr. * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, * struct net_device *dev, struct net_device *filter_dev, @@ -962,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. * * int (*ndo_get_phys_port_id)(struct net_device *dev, - * struct netdev_phys_port_id *ppid); + * struct netdev_phys_item_id *ppid); * Called to get ID of physical port of this device. If driver does * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. @@ -1004,6 +1018,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * performing GSO on a packet. The device returns true if it is * able to GSO the packet, false otherwise. If the return value is * false the stack will do software GSO. + * + * int (*ndo_switch_parent_id_get)(struct net_device *dev, + * struct netdev_phys_item_id *psid); + * Called to get an ID of the switch chip this port is part of. + * If driver implements this, it indicates that it represents a port + * of a switch chip. + * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); + * Called to notify switch device port of bridge port STP + * state change. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1114,11 +1137,13 @@ struct net_device_ops { struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, + u16 vid, u16 flags); int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr); + const unsigned char *addr, + u16 vid); int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, @@ -1136,7 +1161,7 @@ struct net_device_ops { int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, - struct netdev_phys_port_id *ppid); + struct netdev_phys_item_id *ppid); void (*ndo_add_vxlan_port)(struct net_device *dev, sa_family_t sa_family, __be16 port); @@ -1155,6 +1180,12 @@ struct net_device_ops { int (*ndo_get_lock_subclass)(struct net_device *dev); bool (*ndo_gso_check) (struct sk_buff *skb, struct net_device *dev); +#ifdef CONFIG_NET_SWITCHDEV + int (*ndo_switch_parent_id_get)(struct net_device *dev, + struct netdev_phys_item_id *psid); + int (*ndo_switch_port_stp_update)(struct net_device *dev, + u8 state); +#endif }; /** @@ -1216,6 +1247,8 @@ enum netdev_priv_flags { IFF_LIVE_ADDR_CHANGE = 1<<20, IFF_MACVLAN = 1<<21, IFF_XMIT_DST_RELEASE_PERM = 1<<22, + IFF_IPVLAN_MASTER = 1<<23, + IFF_IPVLAN_SLAVE = 1<<24, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1241,6 +1274,8 @@ enum netdev_priv_flags { #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM +#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER +#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE /** * struct net_device - The DEVICE structure. @@ -1572,6 +1607,7 @@ struct net_device { struct inet6_dev __rcu *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; + struct wpan_dev *ieee802154_ptr; /* * Cache lines mostly used on receive path (including eth_type_trans()) @@ -1590,6 +1626,7 @@ struct net_device { #endif + unsigned long gro_flush_timeout; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; @@ -2316,10 +2353,7 @@ extern int netdev_flow_limit_table_len; * Incoming packets are placed on per-cpu queues */ struct softnet_data { - struct Qdisc *output_queue; - struct Qdisc **output_queue_tailp; struct list_head poll_list; - struct sk_buff *completion_queue; struct sk_buff_head process_queue; /* stats */ @@ -2327,10 +2361,17 @@ struct softnet_data { unsigned int time_squeeze; unsigned int cpu_collision; unsigned int received_rps; - #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; +#endif +#ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit __rcu *flow_limit; +#endif + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; +#ifdef CONFIG_RPS /* Elements below can be accessed between CPUs for RPS */ struct call_single_data csd ____cacheline_aligned_in_smp; struct softnet_data *rps_ipi_next; @@ -2342,9 +2383,6 @@ struct softnet_data { struct sk_buff_head input_pkt_queue; struct napi_struct backlog; -#ifdef CONFIG_NET_FLOW_LIMIT - struct sd_flow_limit __rcu *flow_limit; -#endif }; static inline void input_queue_head_incr(struct softnet_data *sd) @@ -2748,23 +2786,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, } #endif -static inline int netif_copy_real_num_queues(struct net_device *to_dev, - const struct net_device *from_dev) -{ - int err; - - err = netif_set_real_num_tx_queues(to_dev, - from_dev->real_num_tx_queues); - if (err) - return err; -#ifdef CONFIG_SYSFS - return netif_set_real_num_rx_queues(to_dev, - from_dev->real_num_rx_queues); -#else - return 0; -#endif -} - #ifdef CONFIG_SYSFS static inline unsigned int get_netdev_rx_queue_index( struct netdev_rx_queue *queue) @@ -2864,7 +2885,7 @@ void dev_set_group(struct net_device *, int); int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, - struct netdev_phys_port_id *ppid); + struct netdev_phys_item_id *ppid); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3425,6 +3446,12 @@ void netdev_upper_dev_unlink(struct net_device *dev, void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); + +/* RSS keys are 40 or 52 bytes long */ +#define NETDEV_RSS_KEY_LEN 52 +extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; +void netdev_rss_key_fill(void *buffer, size_t len); + int dev_get_nest_level(struct net_device *dev, bool (*type_check)(struct net_device *dev)); int skb_checksum_help(struct sk_buff *skb); @@ -3569,7 +3596,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } @@ -3614,6 +3641,21 @@ static inline bool netif_is_macvlan(struct net_device *dev) return dev->priv_flags & IFF_MACVLAN; } +static inline bool netif_is_macvlan_port(struct net_device *dev) +{ + return dev->priv_flags & IFF_MACVLAN_PORT; +} + +static inline bool netif_is_ipvlan(struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_SLAVE; +} + +static inline bool netif_is_ipvlan_port(struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_MASTER; +} + static inline bool netif_is_bond_master(struct net_device *dev) { return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 20163b9a0eae70cfdfba688bab5dc08eed5fcfdb..167342c2ce6b05d29d05dcc529023eac5cadb9ec 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * */ #ifndef NL802154_H diff --git a/include/linux/phy.h b/include/linux/phy.h index d090cfcaa167fe33a4fc0e63562219a813f37c28..22af8f8f5802bfb7e82b8c837d994d52a87a2b08 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -433,6 +433,7 @@ struct phy_device { * by this PHY * flags: A bitfield defining certain other features this PHY * supports (like interrupts) + * driver_data: static driver data * * The drivers must implement config_aneg and read_status. All * other functions are optional. Note that none of these @@ -448,6 +449,7 @@ struct phy_driver { unsigned int phy_id_mask; u32 features; u32 flags; + const void *driver_data; /* * Called to issue a PHY software reset @@ -772,4 +774,28 @@ int __init mdio_bus_init(void); void mdio_bus_exit(void); extern struct bus_type mdio_bus_type; + +/** + * module_phy_driver() - Helper macro for registering PHY drivers + * @__phy_drivers: array of PHY drivers to register + * + * Helper macro for PHY drivers which do not do anything special in module + * init/exit. Each module may only use this macro once, and calling it + * replaces module_init() and module_exit(). + */ +#define phy_module_driver(__phy_drivers, __count) \ +static int __init phy_module_init(void) \ +{ \ + return phy_drivers_register(__phy_drivers, __count); \ +} \ +module_init(phy_module_init); \ +static void __exit phy_module_exit(void) \ +{ \ + phy_drivers_unregister(__phy_drivers, __count); \ +} \ +module_exit(phy_module_exit) + +#define module_phy_driver(__phy_drivers) \ + phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) + #endif /* __PHY_H */ diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h new file mode 100644 index 0000000000000000000000000000000000000000..26af54321958e097f4a7b7e92a711d7c73cd0dce --- /dev/null +++ b/include/linux/platform_data/bcmgenet.h @@ -0,0 +1,18 @@ +#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__ +#define __LINUX_PLATFORM_DATA_BCMGENET_H__ + +#include +#include +#include + +struct bcmgenet_platform_data { + bool mdio_enabled; + phy_interface_t phy_interface; + int phy_address; + int phy_speed; + int phy_duplex; + u8 mac_address[ETH_ALEN]; + int genet_version; +}; + +#endif diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h index 1730312398ff24ab3d0f38a63b713fafd4236ed1..5087fff96d86a53d04443235aaa9351e5cc6b5ba 100644 --- a/include/linux/platform_data/st21nfca.h +++ b/include/linux/platform_data/st21nfca.h @@ -24,7 +24,6 @@ #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" struct st21nfca_nfc_platform_data { - unsigned int gpio_irq; unsigned int gpio_ena; unsigned int irq_polarity; }; diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h index 2d11f1f5efaba8884b9e2c7a07ad7e26732b9d48..c3b432f5b63e7eea706738a951d64e2556033ee4 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st21nfcb.h @@ -24,7 +24,6 @@ #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" struct st21nfcb_nfc_platform_data { - unsigned int gpio_irq; unsigned int gpio_reset; unsigned int irq_polarity; }; diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h index 18d75e795606c8563ffe4313c36892f6f72fa146..e1ab6e86cdb39491e9a1a311d1e6323bf0b2f9ad 100644 --- a/include/linux/pxa168_eth.h +++ b/include/linux/pxa168_eth.h @@ -4,6 +4,8 @@ #ifndef __LINUX_PXA168_ETH_H #define __LINUX_PXA168_ETH_H +#include + struct pxa168_eth_platform_data { int port_number; int phy_addr; @@ -13,6 +15,7 @@ struct pxa168_eth_platform_data { */ int speed; /* 0, SPEED_10, SPEED_100 */ int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ + phy_interface_t intf; /* * Override default RX/TX queue sizes if nonzero. diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 372ad5e0dcb88df4af003686c7057af3e900d281..aa79b3c24f66fe7a2b2be25ed0222b4c5c712fbe 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ typeof(*(pos)), member)) +/** + * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from_rcu(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ + typeof(*(pos)), member)) #endif /* __KERNEL__ */ #endif diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index fb298e9d6d3a8e75c280162f21e04c5112eebeac..b93fd89b2e5e086e23f054d33111c042317f5dbd 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -65,7 +65,10 @@ struct rhashtable_params { size_t new_size); bool (*shrink_decision)(const struct rhashtable *ht, size_t new_size); - int (*mutex_is_held)(void); +#ifdef CONFIG_PROVE_LOCKING + int (*mutex_is_held)(void *parent); + void *parent; +#endif }; /** @@ -96,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); +bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev, gfp_t flags); + struct rhash_head __rcu **pprev); bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); -int rhashtable_expand(struct rhashtable *ht, gfp_t flags); -int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); +int rhashtable_expand(struct rhashtable *ht); +int rhashtable_shrink(struct rhashtable *ht); void *rhashtable_lookup(const struct rhashtable *ht, const void *key); void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 6cacbce1a06c54700de7017a008163cb0d4ee9b6..5db76a32fcaba3d9656fa5de6d077780d1d07e46 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, long expires, u32 error); void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); +struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, + unsigned change, gfp_t flags); +void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, + gfp_t flags); + /* RTNL is used as a global lock for all changes to network configuration */ extern void rtnl_lock(void); @@ -94,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - u16 flags); + u16 vid, + u16 flags); extern int ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr); + const unsigned char *addr, + u16 vid); extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u16 mode); + struct net_device *dev, u16 mode, + u32 flags, u32 mask); #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6c8b6f604e7609f2d320a7aa572a8ec6a870f4b8..85ab7d72b54c2f269812015b19544674bc6dcd72 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #include @@ -148,6 +150,8 @@ struct net_device; struct scatterlist; struct pipe_inode_info; +struct iov_iter; +struct napi_struct; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -341,7 +345,6 @@ enum { SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ - SKB_FCLONE_FREE, /* this companion fclone skb is available */ }; enum { @@ -370,8 +373,7 @@ enum { SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, - SKB_GSO_MPLS = 1 << 12, - + SKB_GSO_TUNNEL_REMCSUM = 1 << 12, }; #if BITS_PER_LONG > 32 @@ -440,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @next: Next buffer in list * @prev: Previous buffer in list * @tstamp: Time we arrived/left + * @rbnode: RB tree node, alternative to next/prev for netem/tcp * @sk: Socket we are owned by * @dev: Device we arrived on/are leaving by * @cb: Control buffer. Free for use by every layer. Put private vars here @@ -504,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, */ struct sk_buff { - /* These two members must be first. */ - struct sk_buff *next; - struct sk_buff *prev; - union { - ktime_t tstamp; - struct skb_mstamp skb_mstamp; + struct { + /* These two members must be first. */ + struct sk_buff *next; + struct sk_buff *prev; + + union { + ktime_t tstamp; + struct skb_mstamp skb_mstamp; + }; + }; + struct rb_node rbnode; /* used in netem & tcp stack */ }; - struct sock *sk; struct net_device *dev; @@ -597,7 +604,8 @@ struct sk_buff { #endif __u8 ipvs_property:1; __u8 inner_protocol_type:1; - /* 4 or 6 bit hole */ + __u8 remcsum_offload:1; + /* 3 or 5 bit hole */ #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -666,6 +674,7 @@ struct sk_buff { #define SKB_ALLOC_FCLONE 0x01 #define SKB_ALLOC_RX 0x02 +#define SKB_ALLOC_NAPI 0x04 /* Returns true if the skb was allocated from PFMEMALLOC reserves */ static inline bool skb_pfmemalloc(const struct sk_buff *skb) @@ -710,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) skb->_skb_refdst = (unsigned long)dst; } -void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, - bool force); - /** * skb_dst_set_noref - sets skb dst, hopefully, without taking reference * @skb: buffer @@ -725,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, */ static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { - __skb_dst_set_noref(skb, dst, false); -} - -/** - * skb_dst_set_noref_force - sets skb dst, without taking reference - * @skb: buffer - * @dst: dst entry - * - * Sets skb dst, assuming a reference was not taken on dst. - * No reference is taken and no dst_release will be called. While for - * cached dsts deferred reclaim is a basic feature, for entries that are - * not cached it is caller's job to guarantee that last dst_release for - * provided dst happens when nobody uses it, eg. after a RCU grace period. - */ -static inline void skb_dst_set_noref_force(struct sk_buff *skb, - struct dst_entry *dst) -{ - __skb_dst_set_noref(skb, dst, true); + WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } /** @@ -810,7 +800,7 @@ static inline bool skb_fclone_busy(const struct sock *sk, fclones = container_of(skb, struct sk_buff_fclones, skb1); return skb->fclone == SKB_FCLONE_ORIG && - fclones->skb2.fclone == SKB_FCLONE_CLONE && + atomic_read(&fclones->fclone_ref) > 1 && fclones->skb2.sk == sk; } @@ -2176,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); } +void *napi_alloc_frag(unsigned int fragsz); +struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, + unsigned int length, gfp_t gfp_mask); +static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + return __napi_alloc_skb(napi, length, GFP_ATOMIC); +} + /** - * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data - * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX - * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used - * @order: size of the allocation + * __dev_alloc_pages - allocate page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx + * @order: size of the allocation * - * Allocate a new page. + * Allocate a new page. * - * %NULL is returned if there is no free memory. + * %NULL is returned if there is no free memory. */ -static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, - struct sk_buff *skb, - unsigned int order) -{ - struct page *page; - - gfp_mask |= __GFP_COLD; - - if (!(gfp_mask & __GFP_NOMEMALLOC)) - gfp_mask |= __GFP_MEMALLOC; +static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, + unsigned int order) +{ + /* This piece of code contains several assumptions. + * 1. This is for device Rx, therefor a cold page is preferred. + * 2. The expectation is the user wants a compound page. + * 3. If requesting a order 0 page it will not be compound + * due to the check to see if order has a value in prep_new_page + * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to + * code in gfp_to_alloc_flags that should be enforcing this. + */ + gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC; - page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); - if (skb && page && page->pfmemalloc) - skb->pfmemalloc = true; + return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); +} - return page; +static inline struct page *dev_alloc_pages(unsigned int order) +{ + return __dev_alloc_pages(GFP_ATOMIC, order); } /** - * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data - * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX - * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used + * __dev_alloc_page - allocate a page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx * - * Allocate a new page. + * Allocate a new page. * - * %NULL is returned if there is no free memory. + * %NULL is returned if there is no free memory. */ -static inline struct page *__skb_alloc_page(gfp_t gfp_mask, - struct sk_buff *skb) +static inline struct page *__dev_alloc_page(gfp_t gfp_mask) +{ + return __dev_alloc_pages(gfp_mask, 0); +} + +static inline struct page *dev_alloc_page(void) { - return __skb_alloc_pages(gfp_mask, skb, 0); + return __dev_alloc_page(GFP_ATOMIC); } /** @@ -2448,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ - static inline int skb_padto(struct sk_buff *skb, unsigned int len) { unsigned int size = skb->len; @@ -2457,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) return skb_pad(skb, len - size); } +/** + * skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} + static inline int skb_add_data(struct sk_buff *skb, char __user *from, int copy) { @@ -2629,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); -int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, - struct iovec *to, int size); -int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, - struct iovec *iov); -int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, - const struct iovec *from, int from_offset, - int len); -int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, - int offset, size_t count); -int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, - const struct iovec *to, int to_offset, - int size); +int skb_copy_datagram_iter(const struct sk_buff *from, int offset, + struct iov_iter *to, int size); +static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, + struct msghdr *msg, int size) +{ + return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); +} +int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, + struct msghdr *msg); +int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, + struct iov_iter *from, int len); +int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); @@ -2661,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet); unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); +int skb_ensure_writable(struct sk_buff *skb, int write_len); +int skb_vlan_pop(struct sk_buff *skb); +int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); + +static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) +{ + /* XXX: stripping const */ + return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len); +} + +static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) +{ + return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; +} struct skb_checksum_ops { __wsum (*update)(const void *mem, int len, __wsum wsum); diff --git a/include/linux/socket.h b/include/linux/socket.h index bb9b83640070f4a366a8ca67f56495f07aaa7830..6e49a14365dc1bea4bc442097dcaebb7bb0c08a9 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -47,16 +47,25 @@ struct linger { struct msghdr { void *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ - struct iovec *msg_iov; /* scatter/gather array */ - __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + struct iov_iter msg_iter; /* data */ void *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ }; + +struct user_msghdr { + void __user *msg_name; /* ptr to socket address structure */ + int msg_namelen; /* size of socket address structure */ + struct iovec __user *msg_iov; /* scatter/gather array */ + __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + void __user *msg_control; /* ancillary data */ + __kernel_size_t msg_controllen; /* ancillary data buffer length */ + unsigned int msg_flags; /* flags on received message */ +}; /* For recvmmsg/sendmmsg */ struct mmsghdr { - struct msghdr msg_hdr; + struct user_msghdr msg_hdr; unsigned int msg_len; }; @@ -94,6 +103,10 @@ struct cmsghdr { (cmsg)->cmsg_len <= (unsigned long) \ ((mhdr)->msg_controllen - \ ((char *)(cmsg) - (char *)(mhdr)->msg_control))) +#define for_each_cmsghdr(cmsg, msg) \ + for (cmsg = CMSG_FIRSTHDR(msg); \ + cmsg; \ + cmsg = CMSG_NXTHDR(msg, cmsg)) /* * Get the next cmsg header @@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, extern unsigned long iov_pages(const struct iovec *iov, int offset, unsigned long nr_segs); -extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); struct timespec; /* The __sys_...msg variants allow MSG_CMSG_COMPAT */ -extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); -extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); +extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bda9b81357ccf13ae68f2c951e9d38d86896cbad..c9afdc7a7f84db0eec47414cca25598abed52b89 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -25,7 +25,7 @@ struct linux_dirent64; struct list_head; struct mmap_arg_struct; struct msgbuf; -struct msghdr; +struct user_msghdr; struct mmsghdr; struct msqid_ds; struct new_utsname; @@ -601,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); asmlinkage long sys_send(int, void __user *, size_t, unsigned); asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, struct sockaddr __user *, int); -asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags); asmlinkage long sys_recv(int, void __user *, size_t, unsigned); asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *); -asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags, struct timespec __user *timeout); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index c2dee7deefa8cb32af530d20e5aa32a61b10ce68..67309ece0772b9a28e054af553c9aabd76021699 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -130,7 +130,7 @@ struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; u16 tcp_header_len; /* Bytes of tcp header to send */ - u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ + u16 gso_segs; /* Max number of segs per GSO packet */ /* * Header prediction flags @@ -162,7 +162,7 @@ struct tcp_sock { struct { struct sk_buff_head prequeue; struct task_struct *task; - struct iovec *iov; + struct msghdr *msg; int memory; int len; } ucopy; @@ -204,10 +204,10 @@ struct tcp_sock { u16 urg_data; /* Saved octet of OOB data and control flags */ u8 ecn_flags; /* ECN status bits. */ - u8 reordering; /* Packet reordering metric. */ + u8 keepalive_probes; /* num of allowed keep alive probes */ + u32 reordering; /* Packet reordering metric. */ u32 snd_up; /* Urgent pointer */ - u8 keepalive_probes; /* num of allowed keep alive probes */ /* * Options received (usually on last packet, some only on SYN packets). */ diff --git a/include/linux/uio.h b/include/linux/uio.h index bd8569a14c4acadd4df6f82d67d5347504c48e47..a41e252396c0a4d58910858db78a7c6e5e800973 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -131,7 +131,6 @@ size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h index 023430e265fe983f3e1192ccd8093985ee69e053..5691f752ce8f0b6fdd83f43502621f0ca06eebca 100644 --- a/include/linux/vmw_vmci_api.h +++ b/include/linux/vmw_vmci_api.h @@ -24,6 +24,7 @@ #define VMCI_KERNEL_API_VERSION_2 2 #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 +struct msghdr; typedef void (vmci_device_shutdown_fn) (void *device_registration, void *user_data); @@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, void *iov, size_t iov_size, int mode); ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, - void *iov, size_t iov_size, int mode); -ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size, + struct msghdr *msg, size_t iov_size, int mode); +ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); #endif /* !__VMW_VMCI_API_H__ */ diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index d184df1d0d412360a6de44024b9559afc9d13a07..dc03d77ad23bb2588869251a67c5a7143c874b21 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -372,12 +372,12 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset) return skb->len + uncomp_header - ret; } -typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev); - -int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, - const u8 *saddr, const u8 saddr_type, const u8 saddr_len, - const u8 *daddr, const u8 daddr_type, const u8 daddr_len, - u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver); +int +lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, + const u8 *saddr, const u8 saddr_type, + const u8 saddr_len, const u8 *daddr, + const u8 daddr_type, const u8 daddr_len, + u8 iphc0, u8 iphc1); int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len); diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h index 085940f7eeec0e56d5f95226034b32b906bc3380..7d38e2ffd2566b3077b1e6450a00ef4b771d9839 100644 --- a/include/net/af_ieee802154.h +++ b/include/net/af_ieee802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Dmitry Eremin-Solenikov diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index 4282778694006034bccca4ce3fbeba9be29537ec..0d87674fb7758736d9cdbf466fd717c5665f535e 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -103,14 +103,14 @@ struct vsock_transport { int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags); int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, - struct iovec *, size_t len); + struct msghdr *, size_t len); bool (*dgram_allow)(u32 cid, u32 port); /* STREAM. */ /* TODO: stream_bind() */ - ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *, + ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *, size_t len, int flags); - ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *, + ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *, size_t len); s64 (*stream_has_data)(struct vsock_sock *); s64 (*stream_has_space)(struct vsock_sock *); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 6e8f249673089badfb3e512341478171ada9d174..40129b3838b2aa8d811756a61900de4ecf7e850c 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -129,6 +129,15 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_INVALID_BDADDR, + + /* When this quirk is set, the duplicate filtering during + * scanning is based on Bluetooth devices addresses. To allow + * RSSI based updates, restart scanning if needed. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_STRICT_DUPLICATE_FILTER, }; /* HCI device flags */ @@ -154,6 +163,7 @@ enum { enum { HCI_DUT_MODE, HCI_FORCE_SC, + HCI_FORCE_LESC, HCI_FORCE_STATIC_ADDR, }; @@ -265,6 +275,7 @@ enum { /* Low Energy links do not have defined link type. Use invented one */ #define LE_LINK 0x80 #define AMP_LINK 0x81 +#define INVALID_LINK 0xff /* LMP features */ #define LMP_3SLOT 0x01 @@ -332,6 +343,7 @@ enum { #define HCI_LE_ENCRYPTION 0x01 #define HCI_LE_CONN_PARAM_REQ_PROC 0x02 #define HCI_LE_PING 0x10 +#define HCI_LE_EXT_SCAN_POLICY 0x80 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -401,6 +413,7 @@ enum { /* The core spec defines 127 as the "not available" value */ #define HCI_TX_POWER_INVALID 127 +#define HCI_RSSI_INVALID 127 #define HCI_ROLE_MASTER 0x00 #define HCI_ROLE_SLAVE 0x01 @@ -629,7 +642,7 @@ struct hci_cp_user_passkey_reply { struct hci_cp_remote_oob_data_reply { bdaddr_t bdaddr; __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; #define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433 @@ -721,9 +734,9 @@ struct hci_rp_set_csb { struct hci_cp_remote_oob_ext_data_reply { bdaddr_t bdaddr; __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define HCI_OP_SNIFF_MODE 0x0803 @@ -930,7 +943,7 @@ struct hci_cp_write_ssp_mode { struct hci_rp_read_local_oob_data { __u8 status; __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; #define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58 @@ -1014,9 +1027,9 @@ struct hci_cp_write_sc_support { struct hci_rp_read_local_oob_ext_data { __u8 status; __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define HCI_OP_READ_LOCAL_VERSION 0x1001 @@ -1463,6 +1476,11 @@ struct hci_ev_cmd_status { __le16 opcode; } __packed; +#define HCI_EV_HARDWARE_ERROR 0x10 +struct hci_ev_hardware_error { + __u8 code; +} __packed; + #define HCI_EV_ROLE_CHANGE 0x12 struct hci_ev_role_change { __u8 status; @@ -1734,6 +1752,25 @@ struct hci_ev_le_conn_complete { __u8 clk_accurancy; } __packed; +/* Advertising report event types */ +#define LE_ADV_IND 0x00 +#define LE_ADV_DIRECT_IND 0x01 +#define LE_ADV_SCAN_IND 0x02 +#define LE_ADV_NONCONN_IND 0x03 +#define LE_ADV_SCAN_RSP 0x04 + +#define ADDR_LE_DEV_PUBLIC 0x00 +#define ADDR_LE_DEV_RANDOM 0x01 + +#define HCI_EV_LE_ADVERTISING_REPORT 0x02 +struct hci_ev_le_advertising_info { + __u8 evt_type; + __u8 bdaddr_type; + bdaddr_t bdaddr; + __u8 length; + __u8 data[0]; +} __packed; + #define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03 struct hci_ev_le_conn_update_complete { __u8 status; @@ -1759,23 +1796,14 @@ struct hci_ev_le_remote_conn_param_req { __le16 timeout; } __packed; -/* Advertising report event types */ -#define LE_ADV_IND 0x00 -#define LE_ADV_DIRECT_IND 0x01 -#define LE_ADV_SCAN_IND 0x02 -#define LE_ADV_NONCONN_IND 0x03 -#define LE_ADV_SCAN_RSP 0x04 - -#define ADDR_LE_DEV_PUBLIC 0x00 -#define ADDR_LE_DEV_RANDOM 0x01 - -#define HCI_EV_LE_ADVERTISING_REPORT 0x02 -struct hci_ev_le_advertising_info { +#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B +struct hci_ev_le_direct_adv_info { __u8 evt_type; __u8 bdaddr_type; bdaddr_t bdaddr; - __u8 length; - __u8 data[0]; + __u8 direct_addr_type; + bdaddr_t direct_addr; + __s8 rssi; } __packed; /* Internal events generated by Bluetooth stack */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 37ff1aef0845e9c2ef0335b95c87b93b65e9c025..3c7827005c25fafdcad5962866003ba132cafaa6 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -75,6 +75,10 @@ struct discovery_state { u32 last_adv_flags; u8 last_adv_data[HCI_MAX_AD_LENGTH]; u8 last_adv_data_len; + bool report_invalid_rssi; + s8 rssi; + u16 uuid_count; + u8 (*uuids)[16]; }; struct hci_conn_hash { @@ -108,6 +112,7 @@ struct smp_csrk { struct smp_ltk { struct list_head list; + struct rcu_head rcu; bdaddr_t bdaddr; u8 bdaddr_type; u8 authenticated; @@ -120,6 +125,7 @@ struct smp_ltk { struct smp_irk { struct list_head list; + struct rcu_head rcu; bdaddr_t rpa; bdaddr_t bdaddr; u8 addr_type; @@ -128,6 +134,7 @@ struct smp_irk { struct link_key { struct list_head list; + struct rcu_head rcu; bdaddr_t bdaddr; u8 type; u8 val[HCI_LINK_KEY_SIZE]; @@ -137,10 +144,11 @@ struct link_key { struct oob_data { struct list_head list; bdaddr_t bdaddr; + u8 bdaddr_type; u8 hash192[16]; - u8 randomizer192[16]; + u8 rand192[16]; u8 hash256[16]; - u8 randomizer256[16]; + u8 rand256[16]; }; #define HCI_MAX_SHORT_NAME_LENGTH 10 @@ -303,6 +311,7 @@ struct hci_dev { __u32 req_result; void *smp_data; + void *smp_bredr_data; struct discovery_state discovery; struct hci_conn_hash conn_hash; @@ -398,6 +407,8 @@ struct hci_conn { __u16 le_conn_interval; __u16 le_conn_latency; __u16 le_supv_timeout; + __u8 le_adv_data[HCI_MAX_AD_LENGTH]; + __u8 le_adv_data_len; __s8 rssi; __s8 tx_power; __s8 max_tx_power; @@ -496,6 +507,17 @@ static inline void discovery_init(struct hci_dev *hdev) INIT_LIST_HEAD(&hdev->discovery.all); INIT_LIST_HEAD(&hdev->discovery.unknown); INIT_LIST_HEAD(&hdev->discovery.resolve); + hdev->discovery.report_invalid_rssi = true; + hdev->discovery.rssi = HCI_RSSI_INVALID; +} + +static inline void hci_discovery_filter_clear(struct hci_dev *hdev) +{ + hdev->discovery.report_invalid_rssi = true; + hdev->discovery.rssi = HCI_RSSI_INVALID; + hdev->discovery.uuid_count = 0; + kfree(hdev->discovery.uuids); + hdev->discovery.uuids = NULL; } bool hci_discovery_active(struct hci_dev *hdev); @@ -553,6 +575,8 @@ enum { HCI_CONN_STK_ENCRYPT, HCI_CONN_AUTH_INITIATOR, HCI_CONN_DROP, + HCI_CONN_PARAM_REMOVAL_PEND, + HCI_CONN_NEW_LINK_KEY, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) @@ -643,6 +667,26 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev) return c->acl_num + c->amp_num + c->sco_num + c->le_num; } +static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + __u8 type = INVALID_LINK; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->handle == handle) { + type = c->type; + break; + } + } + + rcu_read_unlock(); + + return type; +} + static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, __u16 handle) { @@ -853,6 +897,7 @@ int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); +int hci_reset_dev(struct hci_dev *hdev); int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); int hci_dev_reset(__u16 dev); @@ -894,13 +939,11 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len, bool *persistent); -struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, - u8 role); struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); -struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 addr_type, u8 role); +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 role); int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); void hci_smp_ltks_clear(struct hci_dev *hdev); int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); @@ -915,13 +958,12 @@ void hci_smp_irks_clear(struct hci_dev *hdev); void hci_remote_oob_data_clear(struct hci_dev *hdev); struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, - bdaddr_t *bdaddr); + bdaddr_t *bdaddr, u8 bdaddr_type); int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 *hash, u8 *randomizer); -int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 *hash192, u8 *randomizer192, - u8 *hash256, u8 *randomizer256); -int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr); + u8 bdaddr_type, u8 *hash192, u8 *rand192, + u8 *hash256, u8 *rand256); +int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); @@ -972,6 +1014,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) +#define bredr_sc_enabled(dev) ((lmp_sc_capable(dev) || \ + test_bit(HCI_FORCE_SC, &(dev)->dbg_flags)) && \ + test_bit(HCI_SC_ENABLED, &(dev)->dev_flags)) /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 @@ -1310,9 +1355,8 @@ int mgmt_update_adv_data(struct hci_dev *hdev); void mgmt_discoverable_timeout(struct hci_dev *hdev); void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent); -void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u32 flags, u8 *name, u8 name_len, - u8 *dev_class); +void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, + u32 flags, u8 *name, u8 name_len); void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected); @@ -1349,8 +1393,8 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, - u8 *randomizer192, u8 *hash256, - u8 *randomizer256, u8 status); + u8 *rand192, u8 *hash256, u8 *rand256, + u8 status); void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len); diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index ead99f032f7ab381a0559cf5f4ee23600632d479..d1bb342d083f14efc11ba4992e8c7509a67deac1 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -28,6 +28,7 @@ #define __L2CAP_H #include +#include /* L2CAP defaults */ #define L2CAP_DEFAULT_MTU 672 @@ -140,6 +141,7 @@ struct l2cap_conninfo { #define L2CAP_FC_ATT 0x10 #define L2CAP_FC_SIG_LE 0x20 #define L2CAP_FC_SMP_LE 0x40 +#define L2CAP_FC_SMP_BREDR 0x80 /* L2CAP Control Field bit masks */ #define L2CAP_CTRL_SAR 0xC000 @@ -254,6 +256,7 @@ struct l2cap_conn_rsp { #define L2CAP_CID_ATT 0x0004 #define L2CAP_CID_LE_SIGNALING 0x0005 #define L2CAP_CID_SMP 0x0006 +#define L2CAP_CID_SMP_BREDR 0x0007 #define L2CAP_CID_DYN_START 0x0040 #define L2CAP_CID_DYN_END 0xffff #define L2CAP_CID_LE_DYN_END 0x007f @@ -481,6 +484,7 @@ struct l2cap_chan { struct hci_conn *hs_hcon; struct hci_chan *hs_hchan; struct kref kref; + atomic_t nesting; __u8 state; @@ -604,10 +608,6 @@ struct l2cap_ops { struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb); - int (*memcpy_fromiovec) (struct l2cap_chan *chan, - unsigned char *kdata, - struct iovec *iov, - int len); }; struct l2cap_conn { @@ -617,8 +617,8 @@ struct l2cap_conn { unsigned int mtu; __u32 feat_mask; - __u8 fixed_chan_mask; - bool hs_enabled; + __u8 remote_fixed_chan; + __u8 local_fixed_chan; __u8 info_state; __u8 info_ident; @@ -713,6 +713,17 @@ enum { FLAG_HOLD_HCI_CONN, }; +/* Lock nesting levels for L2CAP channels. We need these because lockdep + * otherwise considers all channels equal and will e.g. complain about a + * connection oriented channel triggering SMP procedures or a listening + * channel creating and locking a child channel. + */ +enum { + L2CAP_NESTING_SMP, + L2CAP_NESTING_NORMAL, + L2CAP_NESTING_PARENT, +}; + enum { L2CAP_TX_STATE_XMIT, L2CAP_TX_STATE_WAIT_F, @@ -778,7 +789,7 @@ void l2cap_chan_put(struct l2cap_chan *c); static inline void l2cap_chan_lock(struct l2cap_chan *chan) { - mutex_lock(&chan->lock); + mutex_lock_nested(&chan->lock, atomic_read(&chan->nesting)); } static inline void l2cap_chan_unlock(struct l2cap_chan *chan) @@ -890,31 +901,6 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan) return 0; } -static inline int l2cap_chan_no_memcpy_fromiovec(struct l2cap_chan *chan, - unsigned char *kdata, - struct iovec *iov, - int len) -{ - /* Following is safe since for compiler definitions of kvec and - * iovec are identical, yielding the same in-core layout and alignment - */ - struct kvec *vec = (struct kvec *)iov; - - while (len > 0) { - if (vec->iov_len) { - int copy = min_t(unsigned int, len, vec->iov_len); - memcpy(kdata, vec->iov_base, copy); - len -= copy; - kdata += copy; - vec->iov_base += copy; - vec->iov_len -= copy; - } - vec++; - } - - return 0; -} - extern bool disable_ertm; int l2cap_init_sockets(void); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 414cd2f9a437f6fde742c5c05f1d901455b0b9bb..95c34d5180fa1ec656bf996ed034817674b34ecf 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -184,6 +184,9 @@ struct mgmt_cp_load_link_keys { #define MGMT_LTK_UNAUTHENTICATED 0x00 #define MGMT_LTK_AUTHENTICATED 0x01 +#define MGMT_LTK_P256_UNAUTH 0x02 +#define MGMT_LTK_P256_AUTH 0x03 +#define MGMT_LTK_P256_DEBUG 0x04 struct mgmt_ltk_info { struct mgmt_addr_info addr; @@ -299,28 +302,28 @@ struct mgmt_cp_user_passkey_neg_reply { #define MGMT_READ_LOCAL_OOB_DATA_SIZE 0 struct mgmt_rp_read_local_oob_data { __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; struct mgmt_rp_read_local_oob_ext_data { __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021 struct mgmt_cp_add_remote_oob_data { struct mgmt_addr_info addr; __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; #define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32) struct mgmt_cp_add_remote_oob_ext_data { struct mgmt_addr_info addr; __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64) @@ -495,6 +498,15 @@ struct mgmt_cp_set_public_address { } __packed; #define MGMT_SET_PUBLIC_ADDRESS_SIZE 6 +#define MGMT_OP_START_SERVICE_DISCOVERY 0x003A +struct mgmt_cp_start_service_discovery { + __u8 type; + __s8 rssi; + __le16 uuid_count; + __u8 uuids[0][16]; +} __packed; +#define MGMT_START_SERVICE_DISCOVERY_SIZE 4 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/drivers/net/bonding/bond_3ad.h b/include/net/bond_3ad.h similarity index 99% rename from drivers/net/bonding/bond_3ad.h rename to include/net/bond_3ad.h index c5f14ac63f3ee7b2b8b41b60939c6002ad9e7c1f..e01d903633eff269866dda0f23e65f60614adf11 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -20,8 +20,8 @@ * */ -#ifndef __BOND_3AD_H__ -#define __BOND_3AD_H__ +#ifndef _NET_BOND_3AD_H +#define _NET_BOND_3AD_H #include #include @@ -279,5 +279,5 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); int bond_3ad_set_carrier(struct bonding *bond); void bond_3ad_update_lacp_rate(struct bonding *bond); -#endif /* __BOND_3AD_H__ */ +#endif /* _NET_BOND_3AD_H */ diff --git a/drivers/net/bonding/bond_alb.h b/include/net/bond_alb.h similarity index 98% rename from drivers/net/bonding/bond_alb.h rename to include/net/bond_alb.h index 1ad473b4ade5b50f7a0dc734f55c60627f65a5cd..313a8d3b306963dd0a2c40fead61391d7ff5954c 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/include/net/bond_alb.h @@ -19,8 +19,8 @@ * */ -#ifndef __BOND_ALB_H__ -#define __BOND_ALB_H__ +#ifndef _NET_BOND_ALB_H +#define _NET_BOND_ALB_H #include @@ -177,5 +177,5 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev); void bond_alb_monitor(struct work_struct *); int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); -#endif /* __BOND_ALB_H__ */ +#endif /* _NET_BOND_ALB_H */ diff --git a/drivers/net/bonding/bond_options.h b/include/net/bond_options.h similarity index 97% rename from drivers/net/bonding/bond_options.h rename to include/net/bond_options.h index 17ded5b291761ca9e85e2fa6c82514a4613815b5..ea6546d2c946aa1caeabcb071a368e07cf89c3a3 100644 --- a/drivers/net/bonding/bond_options.h +++ b/include/net/bond_options.h @@ -8,8 +8,8 @@ * (at your option) any later version. */ -#ifndef _BOND_OPTIONS_H -#define _BOND_OPTIONS_H +#ifndef _NET_BOND_OPTIONS_H +#define _NET_BOND_OPTIONS_H #define BOND_OPT_MAX_NAMELEN 32 #define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST) @@ -127,4 +127,4 @@ static inline void __bond_opt_init(struct bond_opt_value *optval, void bond_option_arp_ip_targets_clear(struct bonding *bond); -#endif /* _BOND_OPTIONS_H */ +#endif /* _NET_BOND_OPTIONS_H */ diff --git a/drivers/net/bonding/bonding.h b/include/net/bonding.h similarity index 98% rename from drivers/net/bonding/bonding.h rename to include/net/bonding.h index 10920f0686e2f0222203359751581d1b728c6617..983a94b86b954c90548df20fe6f574efb6359c70 100644 --- a/drivers/net/bonding/bonding.h +++ b/include/net/bonding.h @@ -12,8 +12,8 @@ * */ -#ifndef _LINUX_BONDING_H -#define _LINUX_BONDING_H +#ifndef _NET_BONDING_H +#define _NET_BONDING_H #include #include @@ -26,9 +26,9 @@ #include #include -#include "bond_3ad.h" -#include "bond_alb.h" -#include "bond_options.h" +#include +#include +#include #define DRV_VERSION "3.7.1" #define DRV_RELDATE "April 27, 2011" @@ -645,4 +645,10 @@ extern struct bond_parm_tbl ad_select_tbl[]; /* exported from bond_netlink.c */ extern struct rtnl_link_ops bond_link_ops; -#endif /* _LINUX_BONDING_H */ +static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb) +{ + atomic_long_inc(&dev->tx_dropped); + dev_kfree_skb_any(skb); +} + +#endif /* _NET_BONDING_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index a2ddcf2398fdadf43bcb5ccc12891aba20f0c946..4ebb816241fa1ad76506d30265878cacb47b1aec 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -319,9 +319,12 @@ struct ieee80211_supported_band { /** * struct vif_params - describes virtual interface parameters * @use_4addr: use 4-address frames - * @macaddr: address to use for this virtual interface. This will only - * be used for non-netdevice interfaces. If this parameter is set - * to zero address the driver may determine the address as needed. + * @macaddr: address to use for this virtual interface. + * If this parameter is set to zero address the driver may + * determine the address as needed. + * This feature is only fully supported by drivers that enable the + * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating + ** only p2p devices with specified MAC. */ struct vif_params { int use_4addr; @@ -798,6 +801,22 @@ struct station_parameters { bool opmode_notif_used; }; +/** + * struct station_del_parameters - station deletion parameters + * + * Used to delete a station entry (or all stations). + * + * @mac: MAC address of the station to remove or NULL to remove all stations + * @subtype: Management frame subtype to use for indicating removal + * (10 = Disassociation, 12 = Deauthentication) + * @reason_code: Reason code for the Disassociation/Deauthentication frame + */ +struct station_del_parameters { + const u8 *mac; + u8 subtype; + u16 reason_code; +}; + /** * enum cfg80211_station_type - the type of station being modified * @CFG80211_STA_AP_CLIENT: client of an AP interface @@ -1339,6 +1358,16 @@ struct mesh_setup { u32 basic_rates; }; +/** + * struct ocb_setup - 802.11p OCB mode setup configuration + * @chandef: defines the channel to use + * + * These parameters are fixed when connecting to the network + */ +struct ocb_setup { + struct cfg80211_chan_def chandef; +}; + /** * struct ieee80211_txq_params - TX queue parameters * @ac: AC identifier @@ -1408,6 +1437,10 @@ struct cfg80211_ssid { * @aborted: (internal) scan request was notified as aborted * @notified: (internal) scan request was notified as done or aborted * @no_cck: used to send probe requests at non CCK rate in 2GHz band + * @mac_addr: MAC address used with randomisation + * @mac_addr_mask: MAC address mask used with randomisation, bits that + * are 0 in the mask should be randomised, bits that are 1 should + * be taken from the @mac_addr */ struct cfg80211_scan_request { struct cfg80211_ssid *ssids; @@ -1422,6 +1455,9 @@ struct cfg80211_scan_request { struct wireless_dev *wdev; + u8 mac_addr[ETH_ALEN] __aligned(2); + u8 mac_addr_mask[ETH_ALEN] __aligned(2); + /* internal */ struct wiphy *wiphy; unsigned long scan_start; @@ -1432,6 +1468,17 @@ struct cfg80211_scan_request { struct ieee80211_channel *channels[0]; }; +static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) +{ + int i; + + get_random_bytes(buf, ETH_ALEN); + for (i = 0; i < ETH_ALEN; i++) { + buf[i] &= ~mask[i]; + buf[i] |= addr[i] & mask[i]; + } +} + /** * struct cfg80211_match_set - sets of attributes to match * @@ -1465,6 +1512,10 @@ struct cfg80211_match_set { * @channels: channels to scan * @min_rssi_thold: for drivers only supporting a single threshold, this * contains the minimum over all matchsets + * @mac_addr: MAC address used with randomisation + * @mac_addr_mask: MAC address mask used with randomisation, bits that + * are 0 in the mask should be randomised, bits that are 1 should + * be taken from the @mac_addr */ struct cfg80211_sched_scan_request { struct cfg80211_ssid *ssids; @@ -1479,6 +1530,9 @@ struct cfg80211_sched_scan_request { int n_match_sets; s32 min_rssi_thold; + u8 mac_addr[ETH_ALEN] __aligned(2); + u8 mac_addr_mask[ETH_ALEN] __aligned(2); + /* internal */ struct wiphy *wiphy; struct net_device *dev; @@ -1911,6 +1965,7 @@ struct cfg80211_wowlan_tcp { * @rfkill_release: wake up when rfkill is released * @tcp: TCP connection establishment/wakeup parameters, see nl80211.h. * NULL if not configured. + * @nd_config: configuration for the scan to be used for net detect wake. */ struct cfg80211_wowlan { bool any, disconnect, magic_pkt, gtk_rekey_failure, @@ -1919,6 +1974,7 @@ struct cfg80211_wowlan { struct cfg80211_pkt_pattern *patterns; struct cfg80211_wowlan_tcp *tcp; int n_patterns; + struct cfg80211_sched_scan_request *nd_config; }; /** @@ -1950,6 +2006,35 @@ struct cfg80211_coalesce { int n_rules; }; +/** + * struct cfg80211_wowlan_nd_match - information about the match + * + * @ssid: SSID of the match that triggered the wake up + * @n_channels: Number of channels where the match occurred. This + * value may be zero if the driver can't report the channels. + * @channels: center frequencies of the channels where a match + * occurred (in MHz) + */ +struct cfg80211_wowlan_nd_match { + struct cfg80211_ssid ssid; + int n_channels; + u32 channels[]; +}; + +/** + * struct cfg80211_wowlan_nd_info - net detect wake up information + * + * @n_matches: Number of match information instances provided in + * @matches. This value may be zero if the driver can't provide + * match information. + * @matches: Array of pointers to matches containing information about + * the matches that triggered the wake up. + */ +struct cfg80211_wowlan_nd_info { + int n_matches; + struct cfg80211_wowlan_nd_match *matches[]; +}; + /** * struct cfg80211_wowlan_wakeup - wakeup report * @disconnect: woke up by getting disconnected @@ -1969,6 +2054,7 @@ struct cfg80211_coalesce { * @tcp_match: TCP wakeup packet received * @tcp_connlost: TCP connection lost or failed to establish * @tcp_nomoretokens: TCP data ran out of tokens + * @net_detect: if not %NULL, woke up because of net detect */ struct cfg80211_wowlan_wakeup { bool disconnect, magic_pkt, gtk_rekey_failure, @@ -1978,6 +2064,7 @@ struct cfg80211_wowlan_wakeup { s32 pattern_idx; u32 packet_present_len, packet_len; const void *packet; + struct cfg80211_wowlan_nd_info *net_detect; }; /** @@ -2132,7 +2219,7 @@ struct cfg80211_qos_map { * @stop_ap: Stop being an AP, including stopping beaconing. * * @add_station: Add a new station. - * @del_station: Remove a station; @mac may be NULL to remove all stations. + * @del_station: Remove a station * @change_station: Modify a given station. Note that flags changes are not much * validated in cfg80211, in particular the auth/assoc/authorized flags * might come to the driver in invalid combinations -- make sure to check @@ -2146,6 +2233,8 @@ struct cfg80211_qos_map { * @change_mpath: change a given mesh path * @get_mpath: get a mesh path for the given parameters * @dump_mpath: dump mesh path callback -- resume dump at index @idx + * @get_mpp: get a mesh proxy path for the given parameters + * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx * @join_mesh: join the mesh network with the specified parameters * (invoked with the wireless_dev mutex held) * @leave_mesh: leave the current mesh network @@ -2331,6 +2420,17 @@ struct cfg80211_qos_map { * with the peer followed by immediate teardown when the addition is later * rejected) * @del_tx_ts: remove an existing TX TS + * + * @join_ocb: join the OCB network with the specified parameters + * (invoked with the wireless_dev mutex held) + * @leave_ocb: leave the current OCB network + * (invoked with the wireless_dev mutex held) + * + * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver + * is responsible for continually initiating channel-switching operations + * and returning to the base channel for communication with the AP. + * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both + * peers must be on the base channel when the call completes. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -2376,7 +2476,7 @@ struct cfg80211_ops { const u8 *mac, struct station_parameters *params); int (*del_station)(struct wiphy *wiphy, struct net_device *dev, - const u8 *mac); + struct station_del_parameters *params); int (*change_station)(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params); @@ -2396,6 +2496,11 @@ struct cfg80211_ops { int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo); + int (*get_mpp)(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *mpp, struct mpath_info *pinfo); + int (*dump_mpp)(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *mpp, + struct mpath_info *pinfo); int (*get_mesh_config)(struct wiphy *wiphy, struct net_device *dev, struct mesh_config *conf); @@ -2407,6 +2512,10 @@ struct cfg80211_ops { const struct mesh_setup *setup); int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev); + int (*join_ocb)(struct wiphy *wiphy, struct net_device *dev, + struct ocb_setup *setup); + int (*leave_ocb)(struct wiphy *wiphy, struct net_device *dev); + int (*change_bss)(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params); @@ -2577,6 +2686,14 @@ struct cfg80211_ops { u16 admitted_time); int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer); + + int (*tdls_channel_switch)(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef); + void (*tdls_cancel_channel_switch)(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr); }; /* @@ -2623,13 +2740,9 @@ struct cfg80211_ops { * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in * beaconing mode (AP, IBSS, Mesh, ...). - * @WIPHY_FLAG_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM - * TSPEC sessions (TID aka TSID 0-7) with the NL80211_CMD_ADD_TX_TS - * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it - * needs to be able to handle Block-Ack agreements and other things. */ enum wiphy_flags { - WIPHY_FLAG_SUPPORTS_WMM_ADMISSION = BIT(0), + /* use hole at 0 */ /* use hole at 1 */ /* use hole at 2 */ WIPHY_FLAG_NETNS_OK = BIT(3), @@ -2755,6 +2868,7 @@ struct ieee80211_txrx_stypes { * @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request * @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure * @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release + * @WIPHY_WOWLAN_NET_DETECT: supports wakeup on network detection */ enum wiphy_wowlan_support_flags { WIPHY_WOWLAN_ANY = BIT(0), @@ -2765,6 +2879,7 @@ enum wiphy_wowlan_support_flags { WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5), WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6), WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7), + WIPHY_WOWLAN_NET_DETECT = BIT(8), }; struct wiphy_wowlan_tcp_support { @@ -2783,6 +2898,11 @@ struct wiphy_wowlan_tcp_support { * @pattern_max_len: maximum length of each pattern * @pattern_min_len: minimum length of each pattern * @max_pkt_offset: maximum Rx packet offset + * @max_nd_match_sets: maximum number of matchsets for net-detect, + * similar, but not necessarily identical, to max_match_sets for + * scheduled scans. + * See &struct cfg80211_sched_scan_request.@match_sets for more + * details. * @tcp: TCP wakeup support information */ struct wiphy_wowlan_support { @@ -2791,6 +2911,7 @@ struct wiphy_wowlan_support { int pattern_max_len; int pattern_min_len; int max_pkt_offset; + int max_nd_match_sets; const struct wiphy_wowlan_tcp_support *tcp; }; @@ -3165,6 +3286,23 @@ static inline const char *wiphy_name(const struct wiphy *wiphy) return dev_name(&wiphy->dev); } +/** + * wiphy_new_nm - create a new wiphy for use with cfg80211 + * + * @ops: The configuration operations for this device + * @sizeof_priv: The size of the private area to allocate + * @requested_name: Request a particular name. + * NULL is valid value, and means use the default phy%d naming. + * + * Create a new wiphy and associate the given operations with it. + * @sizeof_priv bytes are allocated for private use. + * + * Return: A pointer to the new wiphy. This pointer must be + * assigned to each netdev's ieee80211_ptr for proper operation. + */ +struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, + const char *requested_name); + /** * wiphy_new - create a new wiphy for use with cfg80211 * @@ -3177,7 +3315,11 @@ static inline const char *wiphy_name(const struct wiphy *wiphy) * Return: A pointer to the new wiphy. This pointer must be * assigned to each netdev's ieee80211_ptr for proper operation. */ -struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv); +static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops, + int sizeof_priv) +{ + return wiphy_new_nm(ops, sizeof_priv, NULL); +} /** * wiphy_register - register a wiphy with cfg80211 @@ -4500,33 +4642,6 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, gfp_t gfp); -/** - * cfg80211_radar_event - radar detection event - * @wiphy: the wiphy - * @chandef: chandef for the current channel - * @gfp: context flags - * - * This function is called when a radar is detected on the current chanenl. - */ -void cfg80211_radar_event(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, gfp_t gfp); - -/** - * cfg80211_cac_event - Channel availability check (CAC) event - * @netdev: network device - * @chandef: chandef for the current channel - * @event: type of event - * @gfp: context flags - * - * This function is called when a Channel availability check (CAC) is finished - * or aborted. This must be called to notify the completion of a CAC process, - * also by full-MAC drivers. - */ -void cfg80211_cac_event(struct net_device *netdev, - const struct cfg80211_chan_def *chandef, - enum nl80211_radar_event event, gfp_t gfp); - - /** * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer * @dev: network device @@ -4554,6 +4669,42 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev, void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer, u32 num_packets, u32 rate, u32 intvl, gfp_t gfp); +/** + * cfg80211_cqm_beacon_loss_notify - beacon loss event + * @dev: network device + * @gfp: context flags + * + * Notify userspace about beacon loss from the connected AP. + */ +void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp); + +/** + * cfg80211_radar_event - radar detection event + * @wiphy: the wiphy + * @chandef: chandef for the current channel + * @gfp: context flags + * + * This function is called when a radar is detected on the current chanenl. + */ +void cfg80211_radar_event(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, gfp_t gfp); + +/** + * cfg80211_cac_event - Channel availability check (CAC) event + * @netdev: network device + * @chandef: chandef for the current channel + * @event: type of event + * @gfp: context flags + * + * This function is called when a Channel availability check (CAC) is finished + * or aborted. This must be called to notify the completion of a CAC process, + * also by full-MAC drivers. + */ +void cfg80211_cac_event(struct net_device *netdev, + const struct cfg80211_chan_def *chandef, + enum nl80211_radar_event event, gfp_t gfp); + + /** * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying * @dev: network device @@ -4657,6 +4808,20 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, void cfg80211_ch_switch_notify(struct net_device *dev, struct cfg80211_chan_def *chandef); +/* + * cfg80211_ch_switch_started_notify - notify channel switch start + * @dev: the device on which the channel switch started + * @chandef: the future channel definition + * @count: the number of TBTTs until the channel switch happens + * + * Inform the userspace about the channel switch that has just + * started, so that it can take appropriate actions (eg. starting + * channel switch on other vifs), if necessary. + */ +void cfg80211_ch_switch_started_notify(struct net_device *dev, + struct cfg80211_chan_def *chandef, + u8 count); + /** * ieee80211_operating_class_to_band - convert operating class to band * diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h new file mode 100644 index 0000000000000000000000000000000000000000..7f713acfa106c9bd564b4c748a1d9b7b8978d8ae --- /dev/null +++ b/include/net/cfg802154.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Dmitry Eremin-Solenikov + */ + +#ifndef __NET_CFG802154_H +#define __NET_CFG802154_H + +#include +#include +#include +#include + +#include + +struct wpan_phy; + +struct cfg802154_ops { + struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, + const char *name, + int type); + void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, + struct net_device *dev); + int (*add_virtual_intf)(struct wpan_phy *wpan_phy, + const char *name, + enum nl802154_iftype type, + __le64 extended_addr); + int (*del_virtual_intf)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev); + int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel); + int (*set_pan_id)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, __le16 pan_id); + int (*set_short_addr)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, __le16 short_addr); + int (*set_backoff_exponent)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, u8 min_be, + u8 max_be); + int (*set_max_csma_backoffs)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + u8 max_csma_backoffs); + int (*set_max_frame_retries)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + s8 max_frame_retries); + int (*set_lbt_mode)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, bool mode); +}; + +struct wpan_phy { + struct mutex pib_lock; + + /* If multiple wpan_phys are registered and you're handed e.g. + * a regular netdev with assigned ieee802154_ptr, you won't + * know whether it points to a wpan_phy your driver has registered + * or not. Assign this to something global to your driver to + * help determine whether you own this wpan_phy or not. + */ + const void *privid; + + /* + * This is a PIB according to 802.15.4-2011. + * We do not provide timing-related variables, as they + * aren't used outside of driver + */ + u8 current_channel; + u8 current_page; + u32 channels_supported[IEEE802154_MAX_PAGE + 1]; + s8 transmit_power; + u8 cca_mode; + + __le64 perm_extended_addr; + + s32 cca_ed_level; + + /* PHY depended MAC PIB values */ + + /* 802.15.4 acronym: Tdsym in usec */ + u8 symbol_duration; + /* lifs and sifs periods timing */ + u16 lifs_period; + u16 sifs_period; + + struct device dev; + + char priv[0] __aligned(NETDEV_ALIGN); +}; + +struct wpan_dev { + struct wpan_phy *wpan_phy; + int iftype; + + /* the remainder of this struct should be private to cfg802154 */ + struct list_head list; + struct net_device *netdev; + + u32 identifier; + + /* MAC PIB */ + __le16 pan_id; + __le16 short_addr; + __le64 extended_addr; + + /* MAC BSN field */ + u8 bsn; + /* MAC DSN field */ + u8 dsn; + + u8 min_be; + u8 max_be; + u8 csma_retries; + s8 frame_retries; + + bool lbt; + + bool promiscuous_mode; +}; + +#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) + +struct wpan_phy * +wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size); +static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) +{ + phy->dev.parent = dev; +} + +int wpan_phy_register(struct wpan_phy *phy); +void wpan_phy_unregister(struct wpan_phy *phy); +void wpan_phy_free(struct wpan_phy *phy); +/* Same semantics as for class_for_each_device */ +int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data); + +static inline void *wpan_phy_priv(struct wpan_phy *phy) +{ + BUG_ON(!phy); + return &phy->priv; +} + +struct wpan_phy *wpan_phy_find(const char *str); + +static inline void wpan_phy_put(struct wpan_phy *phy) +{ + put_device(&phy->dev); +} + +static inline const char *wpan_phy_name(struct wpan_phy *phy) +{ + return dev_name(&phy->dev); +} + +#endif /* __NET_CFG802154_H */ diff --git a/include/net/checksum.h b/include/net/checksum.h index 6465bae80a4f8ee451d175b3dc8022340d418709..e339a9513e2963ea9f5bfbf77a2851836d36cddc 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -151,4 +151,20 @@ static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, (__force __be32)to, pseudohdr); } +static inline __wsum remcsum_adjust(void *ptr, __wsum csum, + int start, int offset) +{ + __sum16 *psum = (__sum16 *)(ptr + offset); + __wsum delta; + + /* Subtract out checksum up to start */ + csum = csum_sub(csum, csum_partial(ptr, start, 0)); + + /* Set derived checksum in packet */ + delta = csum_sub(csum_fold(csum), *psum); + *psum = csum_fold(csum); + + return delta; +} + #endif diff --git a/include/net/compat.h b/include/net/compat.h index 3b603b199c01c5d554202323dde4253e01f7a4ab..42a9c8431177c295276068e18967c1bce12e648e 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -40,9 +40,8 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *); #define compat_mmsghdr mmsghdr #endif /* defined(CONFIG_COMPAT) */ -int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *); -int verify_compat_iovec(struct msghdr *, struct iovec *, - struct sockaddr_storage *, int); +ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, + struct sockaddr __user **, struct iovec **); asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, unsigned int); asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, diff --git a/include/net/dsa.h b/include/net/dsa.h index b76559293535b3b388d0ec1b66105df6892fa754..ed3c34bbb67ab89f1570acf3daf3d93aaf06a34c 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -38,6 +38,9 @@ struct dsa_chip_data { struct device *host_dev; int sw_addr; + /* set to size of eeprom if supported by the switch */ + int eeprom_len; + /* Device tree node pointer for this specific switch chip * used during switch setup in case additional properties * and resources needs to be used @@ -139,6 +142,14 @@ struct dsa_switch { */ struct device *master_dev; +#ifdef CONFIG_NET_DSA_HWMON + /* + * Hardware monitoring information + */ + char hwmon_name[IFNAMSIZ + 8]; + struct device *hwmon_dev; +#endif + /* * Slave mii_bus and devices for the individual ports. */ @@ -242,6 +253,28 @@ struct dsa_switch_driver { struct ethtool_eee *e); int (*get_eee)(struct dsa_switch *ds, int port, struct ethtool_eee *e); + +#ifdef CONFIG_NET_DSA_HWMON + /* Hardware monitoring */ + int (*get_temp)(struct dsa_switch *ds, int *temp); + int (*get_temp_limit)(struct dsa_switch *ds, int *temp); + int (*set_temp_limit)(struct dsa_switch *ds, int temp); + int (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm); +#endif + + /* EEPROM access */ + int (*get_eeprom_len)(struct dsa_switch *ds); + int (*get_eeprom)(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data); + int (*set_eeprom)(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data); + + /* + * Register access. + */ + int (*get_regs_len)(struct dsa_switch *ds, int port); + void (*get_regs)(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *p); }; void register_switch_driver(struct dsa_switch_driver *type); diff --git a/include/net/fou.h b/include/net/fou.h new file mode 100644 index 0000000000000000000000000000000000000000..19b8a0c62a9801a8401f1289a94c13c242bef6e7 --- /dev/null +++ b/include/net/fou.h @@ -0,0 +1,19 @@ +#ifndef __NET_FOU_H +#define __NET_FOU_H + +#include + +#include +#include +#include +#include + +size_t fou_encap_hlen(struct ip_tunnel_encap *e); +static size_t gue_encap_hlen(struct ip_tunnel_encap *e); + +int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); +int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); + +#endif diff --git a/include/net/gue.h b/include/net/gue.h index b6c332788084b8457ac07193354fed96822beba0..3f28ec7f1c7f1ccd68bba98880f7f25da50bff95 100644 --- a/include/net/gue.h +++ b/include/net/gue.h @@ -1,23 +1,116 @@ #ifndef __NET_GUE_H #define __NET_GUE_H +/* Definitions for the GUE header, standard and private flags, lengths + * of optional fields are below. + * + * Diagram of GUE header: + * + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |Ver|C| Hlen | Proto/ctype | Standard flags |P| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * ~ Fields (optional) ~ + * | | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Private flags (optional, P bit is set) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * ~ Private fields (optional) ~ + * | | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * C bit indicates contol message when set, data message when unset. + * For a control message, proto/ctype is interpreted as a type of + * control message. For data messages, proto/ctype is the IP protocol + * of the next header. + * + * P bit indicates private flags field is present. The private flags + * may refer to options placed after this field. + */ + struct guehdr { union { struct { #if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 hlen:4, - version:4; + __u8 hlen:5, + control:1, + version:2; #elif defined (__BIG_ENDIAN_BITFIELD) - __u8 version:4, - hlen:4; + __u8 version:2, + control:1, + hlen:5; #else #error "Please fix " #endif - __u8 next_hdr; + __u8 proto_ctype; __u16 flags; }; __u32 word; }; }; +/* Standard flags in GUE header */ + +#define GUE_FLAG_PRIV htons(1<<0) /* Private flags are in options */ +#define GUE_LEN_PRIV 4 + +#define GUE_FLAGS_ALL (GUE_FLAG_PRIV) + +/* Private flags in the private option extension */ + +#define GUE_PFLAG_REMCSUM htonl(1 << 31) +#define GUE_PLEN_REMCSUM 4 + +#define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM) + +/* Functions to compute options length corresponding to flags. + * If we ever have a lot of flags this can be potentially be + * converted to a more optimized algorithm (table lookup + * for instance). + */ +static inline size_t guehdr_flags_len(__be16 flags) +{ + return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0); +} + +static inline size_t guehdr_priv_flags_len(__be32 flags) +{ + return 0; +} + +/* Validate standard and private flags. Returns non-zero (meaning invalid) + * if there is an unknown standard or private flags, or the options length for + * the flags exceeds the options length specific in hlen of the GUE header. + */ +static inline int validate_gue_flags(struct guehdr *guehdr, + size_t optlen) +{ + size_t len; + __be32 flags = guehdr->flags; + + if (flags & ~GUE_FLAGS_ALL) + return 1; + + len = guehdr_flags_len(flags); + if (len > optlen) + return 1; + + if (flags & GUE_FLAG_PRIV) { + /* Private flags are last four bytes accounted in + * guehdr_flags_len + */ + flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); + + if (flags & ~GUE_PFLAGS_ALL) + return 1; + + len += guehdr_priv_flags_len(flags); + if (len > optlen) + return 1; + } + + return 0; +} + #endif diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 3b53c8e405e48143f667c20850db1666c402d8fa..83bb8a73d23c0f056b10a8c9061997a7b98e02a9 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Pavel Smolenskiy * Maxim Gorbachyov @@ -27,10 +23,10 @@ #ifndef IEEE802154_NETDEVICE_H #define IEEE802154_NETDEVICE_H -#include #include #include #include +#include struct ieee802154_sechdr { #if defined(__LITTLE_ENDIAN_BITFIELD) @@ -427,8 +423,6 @@ struct ieee802154_mlme_ops { /* The fields below are required. */ - struct wpan_phy *(*get_phy)(const struct net_device *dev); - /* * FIXME: these should become the part of PIB/MIB interface. * However we still don't have IB interface of any kind @@ -438,16 +432,6 @@ struct ieee802154_mlme_ops { u8 (*get_dsn)(const struct net_device *dev); }; -/* The IEEE 802.15.4 standard defines 2 type of the devices: - * - FFD - full functionality device - * - RFD - reduce functionality device - * - * So 2 sets of mlme operations are needed - */ -struct ieee802154_reduced_mlme_ops { - struct wpan_phy *(*get_phy)(const struct net_device *dev); -}; - static inline struct ieee802154_mlme_ops * ieee802154_mlme_ops(const struct net_device *dev) { diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index d1d272843b3bd609c5e93650d835f32055444d6a..9201afe083faf4f5b12e0f59376f27521050e628 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -99,4 +99,14 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, const struct in6_addr *daddr, const __be16 dport, const int dif); #endif /* IS_ENABLED(CONFIG_IPV6) */ + +#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_portpair == (__ports)) && \ + ((__sk)->sk_family == AF_INET6) && \ + ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ + ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ + (!(__sk)->sk_bound_dev_if || \ + ((__sk)->sk_bound_dev_if == (__dif))) && \ + net_eq(sock_net(__sk), (__net))) + #endif /* _INET6_HASHTABLES_H */ diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index a5593dab6af7fe0081700a2eaf5ff0e57d603ca7..9326c41c2d7f9e0512e9172569ba407ca59853bd 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -65,7 +65,8 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t); void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); -int ip6_tnl_xmit_ctl(struct ip6_tnl *t); +int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, + const struct in6_addr *raddr); __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index dc9d2a27c3158115b927b3f4b93e5bd98799d749..09a819ee21519b36a3a841c3383267203444526b 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -201,8 +201,8 @@ void fib_free_table(struct fib_table *tb); #ifndef CONFIG_IP_MULTIPLE_TABLES -#define TABLE_LOCAL_INDEX 0 -#define TABLE_MAIN_INDEX 1 +#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1)) +#define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1)) static inline struct fib_table *fib_get_table(struct net *net, u32 id) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 5bc6edeb7143398e41a5e7a185228d135534a5c8..25a59eb388a6493ea0b329a29ca4ccb7147018dd 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -117,6 +117,22 @@ struct ip_tunnel_net { struct hlist_head tunnels[IP_TNL_HASH_SIZE]; }; +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *e); + int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); +}; + +#define MAX_IPTUN_ENCAP_OPS 8 + +extern const struct ip_tunnel_encap_ops __rcu * + iptun_encaps[MAX_IPTUN_ENCAP_OPS]; + +int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op, + unsigned int num); +int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, + unsigned int num); + #ifdef CONFIG_INET int ip_tunnel_init(struct net_device *dev); diff --git a/include/net/ipx.h b/include/net/ipx.h index 0143180fecc983a01fd5c1c3027f984d0f0be177..e5cff6811b302edf4eea7d89cf94b073c2a8cd49 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h @@ -42,6 +42,9 @@ struct ipxhdr { struct ipx_address ipx_source __packed; }; +/* From af_ipx.c */ +extern int sysctl_ipx_pprop_broadcasting; + static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) { return (struct ipxhdr *)skb_transport_header(skb); @@ -147,7 +150,7 @@ int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, unsigned char *node); void ipxrtr_del_routes(struct ipx_interface *intrfc); int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, - struct iovec *iov, size_t len, int noblock); + struct msghdr *msg, size_t len, int noblock); int ipxrtr_route_skb(struct sk_buff *skb); struct ipx_route *ipxrtr_lookup(__be32 net); int ipxrtr_ioctl(unsigned int cmd, void __user *arg); diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h index a059465101ff3b5a35120d09782b6f10db9c98fa..92c8fb57521387ea3186a6e3fd4b5e65a857036a 100644 --- a/include/net/irda/irda.h +++ b/include/net/irda/irda.h @@ -55,16 +55,6 @@ typedef __u32 magic_t; #endif #ifdef CONFIG_IRDA_DEBUG - -extern unsigned int irda_debug; - -/* use 0 for production, 1 for verification, >2 for debug */ -#define IRDA_DEBUG_LEVEL 0 - -#define IRDA_DEBUG(n, args...) \ -do { if (irda_debug >= (n)) \ - printk(KERN_DEBUG args); \ -} while (0) #define IRDA_ASSERT(expr, func) \ do { if(!(expr)) { \ printk( "Assertion failed! %s:%s:%d %s\n", \ @@ -72,15 +62,10 @@ do { if(!(expr)) { \ func } } while (0) #define IRDA_ASSERT_LABEL(label) label #else -#define IRDA_DEBUG(n, args...) do { } while (0) #define IRDA_ASSERT(expr, func) do { (void)(expr); } while (0) #define IRDA_ASSERT_LABEL(label) #endif /* CONFIG_IRDA_DEBUG */ -#define IRDA_WARNING(args...) do { if (net_ratelimit()) printk(KERN_WARNING args); } while (0) -#define IRDA_MESSAGE(args...) do { if (net_ratelimit()) printk(KERN_INFO args); } while (0) -#define IRDA_ERROR(args...) do { if (net_ratelimit()) printk(KERN_ERR args); } while (0) - /* * Magic numbers used by Linux-IrDA. Random numbers which must be unique to * give the best protection diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h index fb4b76d5d7f16b2983c6edf0c21ba0e6020a0dae..6f23e820618caa88b10f647a9781435abb21b4da 100644 --- a/include/net/irda/irlap.h +++ b/include/net/irda/irlap.h @@ -303,7 +303,7 @@ static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state) if (!self || self->magic != LAP_MAGIC) return; - IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]); + pr_debug("next LAP state = %s\n", irlap_state[state]); */ self->state = state; } diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h index 42713c931d1fee497f000b242204c7b14416ae1d..2d9cd0007cba6ad984635278c95d57e61576142b 100644 --- a/include/net/irda/parameters.h +++ b/include/net/irda/parameters.h @@ -71,17 +71,17 @@ typedef int (*PV_HANDLER)(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func); typedef struct { - PI_HANDLER func; /* Handler for this parameter identifier */ + const PI_HANDLER func; /* Handler for this parameter identifier */ PV_TYPE type; /* Data type for this parameter */ } pi_minor_info_t; typedef struct { - pi_minor_info_t *pi_minor_call_table; + const pi_minor_info_t *pi_minor_call_table; int len; } pi_major_info_t; typedef struct { - pi_major_info_t *tables; + const pi_major_info_t *tables; int len; __u8 pi_mask; int pi_major_offset; diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h index 0e79cfba4b3bef44f98e069f57c227739ff4ff6e..48f3f891b2f96157b47a8e4bee054f9558c2aef3 100644 --- a/include/net/llc_c_st.h +++ b/include/net/llc_c_st.h @@ -35,8 +35,8 @@ struct llc_conn_state_trans { llc_conn_ev_t ev; u8 next_state; - llc_conn_ev_qfyr_t *ev_qualifiers; - llc_conn_action_t *ev_actions; + const llc_conn_ev_qfyr_t *ev_qualifiers; + const llc_conn_action_t *ev_actions; }; struct llc_conn_state { diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h index 567c681f1f3ebf0a8dada1527fd9513602505454..c4359e203013c097837f06ca826f3b887c21bfed 100644 --- a/include/net/llc_s_st.h +++ b/include/net/llc_s_st.h @@ -19,7 +19,7 @@ struct llc_sap_state_trans { llc_sap_ev_t ev; u8 next_state; - llc_sap_action_t *ev_actions; + const llc_sap_action_t *ev_actions; }; struct llc_sap_state { diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 0ad1f47d2dc77ee472656fbbfce1afb08d1e3a6f..58d719ddaa60c93d2c6424765e96abf254cb4dc6 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -263,6 +263,7 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, * note that this is only called when it changes after the channel * context had been assigned. + * @BSS_CHANGED_OCB: OCB join status changed */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -287,6 +288,7 @@ enum ieee80211_bss_change { BSS_CHANGED_P2P_PS = 1<<19, BSS_CHANGED_BEACON_INFO = 1<<20, BSS_CHANGED_BANDWIDTH = 1<<21, + BSS_CHANGED_OCB = 1<<22, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -739,7 +741,8 @@ struct ieee80211_tx_info { u8 ampdu_ack_len; u8 ampdu_len; u8 antenna; - void *status_driver_data[21 / sizeof(void *)]; + u16 tx_time; + void *status_driver_data[19 / sizeof(void *)]; } status; struct { struct ieee80211_tx_rate driver_rates[ @@ -879,6 +882,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * subframes share the same sequence number. Reported subframes can be * either regular MSDU or singly A-MSDUs. Subframes must not be * interleaved with other frames. + * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific + * radiotap data in the skb->data (before the frame) as described by + * the &struct ieee80211_vendor_radiotap. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -908,6 +914,7 @@ enum mac80211_rx_flags { RX_FLAG_10MHZ = BIT(28), RX_FLAG_5MHZ = BIT(29), RX_FLAG_AMSDU_MORE = BIT(30), + RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), }; #define RX_FLAG_STBC_SHIFT 26 @@ -978,6 +985,39 @@ struct ieee80211_rx_status { u8 ampdu_delimiter_crc; }; +/** + * struct ieee80211_vendor_radiotap - vendor radiotap data information + * @present: presence bitmap for this vendor namespace + * (this could be extended in the future if any vendor needs more + * bits, the radiotap spec does allow for that) + * @align: radiotap vendor namespace alignment. This defines the needed + * alignment for the @data field below, not for the vendor namespace + * description itself (which has a fixed 2-byte alignment) + * Must be a power of two, and be set to at least 1! + * @oui: radiotap vendor namespace OUI + * @subns: radiotap vendor sub namespace + * @len: radiotap vendor sub namespace skip length, if alignment is done + * then that's added to this, i.e. this is only the length of the + * @data field. + * @pad: number of bytes of padding after the @data, this exists so that + * the skb data alignment can be preserved even if the data has odd + * length + * @data: the actual vendor namespace data + * + * This struct, including the vendor data, goes into the skb->data before + * the 802.11 header. It's split up in mac80211 using the align/oui/subns + * data. + */ +struct ieee80211_vendor_radiotap { + u32 present; + u8 align; + u8 oui[3]; + u8 subns; + u8 pad; + u16 len; + u8 data[]; +} __packed; + /** * enum ieee80211_conf_flags - configuration flags * @@ -1117,6 +1157,8 @@ struct ieee80211_conf { * Function (TSF) timer when the frame containing the channel switch * announcement was received. This is simply the rx.mactime parameter * the driver passed into mac80211. + * @device_timestamp: arbitrary timestamp for the device, this is the + * rx.device_timestamp parameter the driver passed to mac80211. * @block_tx: Indicates whether transmission must be blocked before the * scheduled channel switch, as indicated by the AP. * @chandef: the new channel to switch to @@ -1124,6 +1166,7 @@ struct ieee80211_conf { */ struct ieee80211_channel_switch { u64 timestamp; + u32 device_timestamp; bool block_tx; struct cfg80211_chan_def chandef; u8 count; @@ -1423,6 +1466,8 @@ struct ieee80211_sta_rates { * @smps_mode: current SMPS mode (off, static or dynamic) * @rates: rate control selection table * @tdls: indicates whether the STA is a TDLS peer + * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only + * valid if the STA is a TDLS peer in the first place. */ struct ieee80211_sta { u32 supp_rates[IEEE80211_NUM_BANDS]; @@ -1438,6 +1483,7 @@ struct ieee80211_sta { enum ieee80211_smps_mode smps_mode; struct ieee80211_sta_rates __rcu *rates; bool tdls; + bool tdls_initiator; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); @@ -1576,6 +1622,10 @@ struct ieee80211_tx_control { * a virtual monitor interface when monitor interfaces are the only * active interfaces. * + * @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to + * be created. It is expected user-space will create vifs as + * desired (and thus have them named as desired). + * * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface * queue mapping in order to use different queues (not just one per AC) * for different virtual interfaces. See the doc section on HW queue @@ -1622,7 +1672,8 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, IEEE80211_HW_MFP_CAPABLE = 1<<13, IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, - /* free slots */ + IEEE80211_HW_NO_AUTO_VIF = 1<<15, + /* free slot */ IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, IEEE80211_HW_CONNECTION_MONITOR = 1<<19, @@ -1775,6 +1826,31 @@ struct ieee80211_scan_request { struct cfg80211_scan_request req; }; +/** + * struct ieee80211_tdls_ch_sw_params - TDLS channel switch parameters + * + * @sta: peer this TDLS channel-switch request/response came from + * @chandef: channel referenced in a TDLS channel-switch request + * @action_code: see &enum ieee80211_tdls_actioncode + * @status: channel-switch response status + * @timestamp: time at which the frame was received + * @switch_time: switch-timing parameter received in the frame + * @switch_timeout: switch-timing parameter received in the frame + * @tmpl_skb: TDLS switch-channel response template + * @ch_sw_tm_ie: offset of the channel-switch timing IE inside @tmpl_skb + */ +struct ieee80211_tdls_ch_sw_params { + struct ieee80211_sta *sta; + struct cfg80211_chan_def *chandef; + u8 action_code; + u32 status; + u32 timestamp; + u16 switch_time; + u16 switch_timeout; + struct sk_buff *tmpl_skb; + u32 ch_sw_tm_ie; +}; + /** * wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy * @@ -2374,6 +2450,22 @@ enum ieee80211_roc_type { IEEE80211_ROC_TYPE_MGMT_TX, }; +/** + * enum ieee80211_reconfig_complete_type - reconfig type + * + * This enum is used by the reconfig_complete() callback to indicate what + * reconfiguration type was completed. + * + * @IEEE80211_RECONFIG_TYPE_RESTART: hw restart type + * (also due to resume() callback returning 1) + * @IEEE80211_RECONFIG_TYPE_SUSPEND: suspend type (regardless + * of wowlan configuration) + */ +enum ieee80211_reconfig_type { + IEEE80211_RECONFIG_TYPE_RESTART, + IEEE80211_RECONFIG_TYPE_SUSPEND, +}; + /** * struct ieee80211_ops - callbacks from mac80211 to the driver * @@ -2530,7 +2622,9 @@ enum ieee80211_roc_type { * * @sw_scan_start: Notifier function that is called just before a software scan * is started. Can be NULL, if the driver doesn't need this notification. - * The callback can sleep. + * The mac_addr parameter allows supporting NL80211_SCAN_FLAG_RANDOM_ADDR, + * the driver may set the NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR flag if it + * can use this parameter. The callback can sleep. * * @sw_scan_complete: Notifier function that is called just after a * software scan finished. Can be NULL, if the driver doesn't need @@ -2601,6 +2695,9 @@ enum ieee80211_roc_type { * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since * otherwise the rate control algorithm is notified directly. * Must be atomic. + * @sta_rate_tbl_update: Notifies the driver that the rate table changed. This + * is only used if the configured rate control algorithm actually uses + * the new rate table API, and is therefore optional. Must be atomic. * * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), * bursting) for a hardware TX queue. @@ -2809,11 +2906,11 @@ enum ieee80211_roc_type { * disabled/enabled via @bss_info_changed. * @stop_ap: Stop operation on the AP interface. * - * @restart_complete: Called after a call to ieee80211_restart_hw(), when the - * reconfiguration has completed. This can help the driver implement the - * reconfiguration step. Also called when reconfiguring because the - * driver's resume function returned 1, as this is just like an "inline" - * hardware restart. This callback may sleep. + * @reconfig_complete: Called after a call to ieee80211_restart_hw() and + * during resume, when the reconfiguration has completed. + * This can help the driver implement the reconfiguration step (and + * indicate mac80211 is ready to receive frames). + * This callback may sleep. * * @ipv6_addr_change: IPv6 address assignment on the given interface changed. * Currently, this is only called for managed or P2P client interfaces. @@ -2829,6 +2926,13 @@ enum ieee80211_roc_type { * transmitted and then call ieee80211_csa_finish(). * If the CSA count starts as zero or 1, this function will not be called, * since there won't be any time to beacon before the switch anyway. + * @pre_channel_switch: This is an optional callback that is called + * before a channel switch procedure is started (ie. when a STA + * gets a CSA or an userspace initiated channel-switch), allowing + * the driver to prepare for the channel switch. + * @post_channel_switch: This is an optional callback that is called + * after a channel switch procedure is completed, allowing the + * driver to go back to a normal configuration. * * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all * information in bss_conf is set up and the beacon can be retrieved. A @@ -2838,6 +2942,26 @@ enum ieee80211_roc_type { * @get_expected_throughput: extract the expected throughput towards the * specified station. The returned value is expressed in Kbps. It returns 0 * if the RC algorithm does not have proper data to provide. + * + * @get_txpower: get current maximum tx power (in dBm) based on configuration + * and hardware limits. + * + * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver + * is responsible for continually initiating channel-switching operations + * and returning to the base channel for communication with the AP. The + * driver receives a channel-switch request template and the location of + * the switch-timing IE within the template as part of the invocation. + * The template is valid only within the call, and the driver can + * optionally copy the skb for further re-use. + * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both + * peers must be on the base channel when the call completes. + * @tdls_recv_channel_switch: a TDLS channel-switch related frame (request or + * response) has been received from a remote peer. The driver gets + * parameters parsed from the incoming frame and may use them to continue + * an ongoing channel-switch operation. In addition, a channel-switch + * response template is provided, together with the location of the + * switch-timing IE within the template. The skb can only be used within + * the function call. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -2897,8 +3021,11 @@ struct ieee80211_ops { struct ieee80211_scan_ies *ies); int (*sched_scan_stop)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); - void (*sw_scan_start)(struct ieee80211_hw *hw); - void (*sw_scan_complete)(struct ieee80211_hw *hw); + void (*sw_scan_start)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr); + void (*sw_scan_complete)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int (*get_stats)(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx, @@ -2932,6 +3059,9 @@ struct ieee80211_ops { struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed); + void (*sta_rate_tbl_update)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); int (*conf_tx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params); @@ -2959,6 +3089,7 @@ struct ieee80211_ops { void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop); void (*channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, struct ieee80211_channel_switch *ch_switch); int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); @@ -3025,7 +3156,8 @@ struct ieee80211_ops { int n_vifs, enum ieee80211_chanctx_switch_mode mode); - void (*restart_complete)(struct ieee80211_hw *hw); + void (*reconfig_complete)(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type); #if IS_ENABLED(CONFIG_IPV6) void (*ipv6_addr_change)(struct ieee80211_hw *hw, @@ -3035,14 +3167,54 @@ struct ieee80211_ops { void (*channel_switch_beacon)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_chan_def *chandef); + int (*pre_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *ch_switch); + + int (*post_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); u32 (*get_expected_throughput)(struct ieee80211_sta *sta); + int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int *dbm); + + int (*tdls_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); + void (*tdls_cancel_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_tdls_ch_sw_params *params); }; /** - * ieee80211_alloc_hw - Allocate a new hardware device + * ieee80211_alloc_hw_nm - Allocate a new hardware device + * + * This must be called once for each hardware device. The returned pointer + * must be used to refer to this device when calling other functions. + * mac80211 allocates a private data area for the driver pointed to by + * @priv in &struct ieee80211_hw, the size of this area is given as + * @priv_data_len. + * + * @priv_data_len: length of private data + * @ops: callbacks for this device + * @requested_name: Requested name for this device. + * NULL is valid value, and means use the default naming (phy%d) + * + * Return: A pointer to the new hardware device, or %NULL on error. + */ +struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, + const struct ieee80211_ops *ops, + const char *requested_name); + +/** + * ieee80211_alloc_hw - Allocate a new hardware device * * This must be called once for each hardware device. The returned pointer * must be used to refer to this device when calling other functions. @@ -3055,8 +3227,12 @@ struct ieee80211_ops { * * Return: A pointer to the new hardware device, or %NULL on error. */ +static inline struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, - const struct ieee80211_ops *ops); + const struct ieee80211_ops *ops) +{ + return ieee80211_alloc_hw_nm(priv_data_len, ops, NULL); +} /** * ieee80211_register_hw - Register hardware device @@ -3442,6 +3618,26 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif, void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); +/** + * ieee80211_tx_status_noskb - transmit status callback without skb + * + * This function can be used as a replacement for ieee80211_tx_status + * in drivers that cannot reliably map tx status information back to + * specific skbs. + * + * Calls to this function for a single hardware must be synchronized + * against each other. Calls to this function, ieee80211_tx_status_ni() + * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware. + * + * @hw: the hardware the frame was transmitted by + * @sta: the receiver station to which this packet is sent + * (NULL for multicast packets) + * @info: tx status information + */ +void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct ieee80211_tx_info *info); + /** * ieee80211_tx_status_ni - transmit status callback (in process context) * @@ -3655,7 +3851,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, /** * ieee80211_probereq_get - retrieve a Probe Request template * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @src_addr: source MAC address * @ssid: SSID buffer * @ssid_len: length of SSID * @tailroom: tailroom to reserve at end of SKB for IEs @@ -3666,7 +3862,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, * Return: The Probe Request template. %NULL on error. */ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, + const u8 *src_addr, const u8 *ssid, size_t ssid_len, size_t tailroom); @@ -4171,6 +4367,22 @@ void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw, struct ieee80211_vif *vif), void *data); +/** + * ieee80211_iterate_stations_atomic - iterate stations + * + * This function iterates over all stations associated with a given + * hardware that are currently uploaded to the driver and calls the callback + * function for them. + * This function requires the iterator callback function to be atomic, + * + * @hw: the hardware struct of which the interfaces should be iterated over + * @iterator: the iterator function to call, cannot sleep + * @data: first argument of the iterator function + */ +void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, + void (*iterator)(void *data, + struct ieee80211_sta *sta), + void *data); /** * ieee80211_queue_work - add work onto the mac80211 workqueue * @@ -4479,6 +4691,14 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event rssi_event, gfp_t gfp); +/** + * ieee80211_cqm_beacon_loss_notify - inform CQM of beacon loss + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @gfp: context flags + */ +void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp); + /** * ieee80211_radar_detected - inform that a radar was detected * @@ -4637,6 +4857,10 @@ struct rate_control_ops { void (*free_sta)(void *priv, struct ieee80211_sta *sta, void *priv_sta); + void (*tx_status_noskb)(void *priv, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_info *info); void (*tx_status)(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb); @@ -4888,4 +5112,69 @@ void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf); void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp); + +/** + * ieee80211_reserve_tid - request to reserve a specific TID + * + * There is sometimes a need (such as in TDLS) for blocking the driver from + * using a specific TID so that the FW can use it for certain operations such + * as sending PTI requests. To make sure that the driver doesn't use that TID, + * this function must be called as it flushes out packets on this TID and marks + * it as blocked, so that any transmit for the station on this TID will be + * redirected to the alternative TID in the same AC. + * + * Note that this function blocks and may call back into the driver, so it + * should be called without driver locks held. Also note this function should + * only be called from the driver's @sta_state callback. + * + * @sta: the station to reserve the TID for + * @tid: the TID to reserve + * + * Returns: 0 on success, else on failure + */ +int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid); + +/** + * ieee80211_unreserve_tid - request to unreserve a specific TID + * + * Once there is no longer any need for reserving a certain TID, this function + * should be called, and no longer will packets have their TID modified for + * preventing use of this TID in the driver. + * + * Note that this function blocks and acquires a lock, so it should be called + * without driver locks held. Also note this function should only be called + * from the driver's @sta_state callback. + * + * @sta: the station + * @tid: the TID to unreserve + */ +void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); + +/** + * ieee80211_ie_split - split an IE buffer according to ordering + * + * @ies: the IE buffer + * @ielen: the length of the IE buffer + * @ids: an array with element IDs that are allowed before + * the split + * @n_ids: the size of the element ID array + * @offset: offset where to start splitting in the buffer + * + * This function splits an IE buffer by updating the @offset + * variable to point to the location where the buffer should be + * split. + * + * It assumes that the given IE buffer is well-formed, this + * has to be guaranteed by the caller! + * + * It also assumes that the IEs in the buffer are ordered + * correctly, if not the result of using this function will not + * be ordered correctly either, i.e. it does no reordering. + * + * The function returns the offset where the next part of the + * buffer starts, which may be @ielen if the entire (remainder) + * of the buffer should be used. + */ +size_t ieee80211_ie_split(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, size_t offset); #endif /* MAC80211_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 2e67cdd19cdc6d34c322056c52dc72fedf2c416b..c823d910b46ca7da89866226ba5e1be7c7c93529 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -12,14 +12,12 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef NET_MAC802154_H #define NET_MAC802154_H #include +#include #include /* General MAC frame format: @@ -35,13 +33,13 @@ */ /* indicates that the Short Address changed */ -#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001 +#define IEEE802154_AFILT_SADDR_CHANGED 0x00000001 /* indicates that the IEEE Address changed */ -#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002 +#define IEEE802154_AFILT_IEEEADDR_CHANGED 0x00000002 /* indicates that the PAN ID changed */ -#define IEEE802515_AFILT_PANID_CHANGED 0x00000004 +#define IEEE802154_AFILT_PANID_CHANGED 0x00000004 /* indicates that PAN Coordinator status changed */ -#define IEEE802515_AFILT_PANC_CHANGED 0x00000008 +#define IEEE802154_AFILT_PANC_CHANGED 0x00000008 struct ieee802154_hw_addr_filt { __le16 pan_id; /* Each independent PAN selects a unique @@ -55,7 +53,14 @@ struct ieee802154_hw_addr_filt { u8 pan_coord; }; -struct ieee802154_dev { +struct ieee802154_vif { + int type; + + /* must be last */ + u8 drv_priv[0] __aligned(sizeof(void *)); +}; + +struct ieee802154_hw { /* filled by the driver */ int extra_tx_headroom; u32 flags; @@ -65,6 +70,7 @@ struct ieee802154_dev { struct ieee802154_hw_addr_filt hw_filt; void *priv; struct wpan_phy *phy; + size_t vif_data_size; }; /* Checksum is in hardware and is omitted from a packet @@ -76,28 +82,43 @@ struct ieee802154_dev { * however, so you are advised to review these flags carefully. */ -/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ -#define IEEE802154_HW_OMIT_CKSUM 0x00000001 +/* Indicates that xmitter will add FCS on it's own. */ +#define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001 /* Indicates that receiver will autorespond with ACK frames. */ -#define IEEE802154_HW_AACK 0x00000002 +#define IEEE802154_HW_AACK 0x00000002 /* Indicates that transceiver will support transmit power setting. */ -#define IEEE802154_HW_TXPOWER 0x00000004 +#define IEEE802154_HW_TXPOWER 0x00000004 /* Indicates that transceiver will support listen before transmit. */ -#define IEEE802154_HW_LBT 0x00000008 +#define IEEE802154_HW_LBT 0x00000008 /* Indicates that transceiver will support cca mode setting. */ -#define IEEE802154_HW_CCA_MODE 0x00000010 +#define IEEE802154_HW_CCA_MODE 0x00000010 /* Indicates that transceiver will support cca ed level setting. */ -#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020 +#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020 /* Indicates that transceiver will support csma (max_be, min_be, csma retries) * settings. */ -#define IEEE802154_HW_CSMA_PARAMS 0x00000040 +#define IEEE802154_HW_CSMA_PARAMS 0x00000040 /* Indicates that transceiver will support ARET frame retries setting. */ -#define IEEE802154_HW_FRAME_RETRIES 0x00000080 +#define IEEE802154_HW_FRAME_RETRIES 0x00000080 +/* Indicates that transceiver will support hardware address filter setting. */ +#define IEEE802154_HW_AFILT 0x00000100 +/* Indicates that transceiver will support promiscuous mode setting. */ +#define IEEE802154_HW_PROMISCUOUS 0x00000200 +/* Indicates that receiver omits FCS. */ +#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400 +/* Indicates that receiver will not filter frames with bad checksum. */ +#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800 + +/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ +#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \ + IEEE802154_HW_RX_OMIT_CKSUM) /* This groups the most common CSMA support fields into one. */ #define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \ IEEE802154_HW_CCA_ED_LEVEL | \ - IEEE802154_HW_CSMA_PARAMS | \ + IEEE802154_HW_CSMA_PARAMS) + +/* This groups the most common ARET support fields into one. */ +#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \ IEEE802154_HW_FRAME_RETRIES) /* struct ieee802154_ops - callbacks from mac802154 to the driver @@ -112,12 +133,24 @@ struct ieee802154_dev { * stop: Handler that 802.15.4 module calls for device cleanup. * This function is called after the last interface is removed. * - * xmit: Handler that 802.15.4 module calls for each transmitted frame. + * xmit_sync: + * Handler that 802.15.4 module calls for each transmitted frame. + * skb cntains the buffer starting from the IEEE 802.15.4 header. + * The low-level driver should send the frame based on available + * configuration. This is called by a workqueue and useful for + * synchronous 802.15.4 drivers. + * This function should return zero or negative errno. + * + * WARNING: + * This will be deprecated soon. We don't accept synced xmit callbacks + * drivers anymore. + * + * xmit_async: + * Handler that 802.15.4 module calls for each transmitted frame. * skb cntains the buffer starting from the IEEE 802.15.4 header. * The low-level driver should send the frame based on available * configuration. - * This function should return zero or negative errno. Called with - * pib_lock held. + * This function should return zero or negative errno. * * ed: Handler that 802.15.4 module calls for Energy Detection. * This function should place the value for detected energy @@ -159,40 +192,75 @@ struct ieee802154_dev { * set_frame_retries * Sets the retransmission attempt limit. Called with pib_lock held. * Returns either zero, or negative errno. + * + * set_promiscuous_mode + * Enables or disable promiscuous mode. */ struct ieee802154_ops { struct module *owner; - int (*start)(struct ieee802154_dev *dev); - void (*stop)(struct ieee802154_dev *dev); - int (*xmit)(struct ieee802154_dev *dev, - struct sk_buff *skb); - int (*ed)(struct ieee802154_dev *dev, u8 *level); - int (*set_channel)(struct ieee802154_dev *dev, - int page, - int channel); - int (*set_hw_addr_filt)(struct ieee802154_dev *dev, - struct ieee802154_hw_addr_filt *filt, + int (*start)(struct ieee802154_hw *hw); + void (*stop)(struct ieee802154_hw *hw); + int (*xmit_sync)(struct ieee802154_hw *hw, + struct sk_buff *skb); + int (*xmit_async)(struct ieee802154_hw *hw, + struct sk_buff *skb); + int (*ed)(struct ieee802154_hw *hw, u8 *level); + int (*set_channel)(struct ieee802154_hw *hw, u8 page, + u8 channel); + int (*set_hw_addr_filt)(struct ieee802154_hw *hw, + struct ieee802154_hw_addr_filt *filt, unsigned long changed); - int (*ieee_addr)(struct ieee802154_dev *dev, __le64 addr); - int (*set_txpower)(struct ieee802154_dev *dev, int db); - int (*set_lbt)(struct ieee802154_dev *dev, bool on); - int (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode); - int (*set_cca_ed_level)(struct ieee802154_dev *dev, + int (*set_txpower)(struct ieee802154_hw *hw, int db); + int (*set_lbt)(struct ieee802154_hw *hw, bool on); + int (*set_cca_mode)(struct ieee802154_hw *hw, u8 mode); + int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 level); - int (*set_csma_params)(struct ieee802154_dev *dev, + int (*set_csma_params)(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries); - int (*set_frame_retries)(struct ieee802154_dev *dev, + int (*set_frame_retries)(struct ieee802154_hw *hw, s8 retries); + int (*set_promiscuous_mode)(struct ieee802154_hw *hw, + const bool on); }; -/* Basic interface to register ieee802154 device */ -struct ieee802154_dev * -ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops); -void ieee802154_free_device(struct ieee802154_dev *dev); -int ieee802154_register_device(struct ieee802154_dev *dev); -void ieee802154_unregister_device(struct ieee802154_dev *dev); +/** + * ieee802154_be64_to_le64 - copies and convert be64 to le64 + * @le64_dst: le64 destination pointer + * @be64_src: be64 source pointer + */ +static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) +{ + __le64 tmp = (__force __le64)swab64p(be64_src); + + memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); +} -void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, +/** + * ieee802154_le64_to_be64 - copies and convert le64 to be64 + * @be64_dst: be64 destination pointer + * @le64_src: le64 source pointer + */ +static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) +{ + __be64 tmp = (__force __be64)swab64p(le64_src); + + memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); +} + +/* Basic interface to register ieee802154 hwice */ +struct ieee802154_hw * +ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops); +void ieee802154_free_hw(struct ieee802154_hw *hw); +int ieee802154_register_hw(struct ieee802154_hw *hw); +void ieee802154_unregister_hw(struct ieee802154_hw *hw); + +void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb); +void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi); +void ieee802154_wake_queue(struct ieee802154_hw *hw); +void ieee802154_stop_queue(struct ieee802154_hw *hw); +void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, + bool ifs_handling); + #endif /* NET_MAC802154_H */ diff --git a/include/net/mpls.h b/include/net/mpls.h new file mode 100644 index 0000000000000000000000000000000000000000..5b3b5addfb080a37baf43ec7bde328f3c579b869 --- /dev/null +++ b/include/net/mpls.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2014 Nicira, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _NET_MPLS_H +#define _NET_MPLS_H 1 + +#include +#include + +#define MPLS_HLEN 4 + +static inline bool eth_p_mpls(__be16 eth_type) +{ + return eth_type == htons(ETH_P_MPLS_UC) || + eth_type == htons(ETH_P_MPLS_MC); +} + +/* + * For non-MPLS skbs this will correspond to the network header. + * For MPLS skbs it will be before the network_header as the MPLS + * label stack lies between the end of the mac header and the network + * header. That is, for MPLS skbs the end of the mac header + * is the top of the MPLS label stack. + */ +static inline unsigned char *skb_mpls_header(struct sk_buff *skb) +{ + return skb_mac_header(skb) + skb->mac_len; +} +#endif diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f60558d0254ca1a482ab4eb2924ecbaf8d6c169c..eb070b3674a1ba346c2c779889a90b636a8d3d7a 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -69,7 +69,7 @@ struct neigh_parms { struct net *net; #endif struct net_device *dev; - struct neigh_parms *next; + struct list_head list; int (*neigh_setup)(struct neighbour *); void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; @@ -203,6 +203,7 @@ struct neigh_table { void (*proxy_redo)(struct sk_buff *skb); char *id; struct neigh_parms parms; + struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; @@ -219,6 +220,13 @@ struct neigh_table { struct pneigh_entry **phash_buckets; }; +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_DN_TABLE = 2, + NEIGH_NR_TABLES, +}; + static inline int neigh_parms_family(struct neigh_parms *p) { return p->tbl->family; @@ -239,8 +247,8 @@ static inline void *neighbour_priv(const struct neighbour *n) #define NEIGH_UPDATE_F_ISROUTER 0x40000000 #define NEIGH_UPDATE_F_ADMIN 0x80000000 -void neigh_table_init(struct neigh_table *tbl); -int neigh_table_clear(struct neigh_table *tbl); +void neigh_table_init(int index, struct neigh_table *tbl); +int neigh_table_clear(int index, struct neigh_table *tbl); struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev); struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index c8a7db605e038007a90cd52cf503a171f36f2f6d..f0daed2b54d139146649b44f3973969ebee57e60 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -92,12 +92,18 @@ struct nf_conn { /* Have we seen traffic both ways yet? (bitset) */ unsigned long status; - /* If we were expected by an expectation, this will be it */ - struct nf_conn *master; - /* Timer function; drops refcnt when it goes off. */ struct timer_list timeout; +#ifdef CONFIG_NET_NS + struct net *ct_net; +#endif + /* all members below initialized via memset */ + u8 __nfct_init_offset[0]; + + /* If we were expected by an expectation, this will be it */ + struct nf_conn *master; + #if defined(CONFIG_NF_CONNTRACK_MARK) u_int32_t mark; #endif @@ -108,9 +114,6 @@ struct nf_conn { /* Extensions */ struct nf_ct_ext *ext; -#ifdef CONFIG_NET_NS - struct net *ct_net; -#endif /* Storage reserved for other modules, must be the last member */ union nf_conntrack_proto proto; diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h new file mode 100644 index 0000000000000000000000000000000000000000..73b729543309d5660b30cd0955b54349d640c17c --- /dev/null +++ b/include/net/netfilter/nf_nat_redirect.h @@ -0,0 +1,12 @@ +#ifndef _NF_NAT_REDIRECT_H_ +#define _NF_NAT_REDIRECT_H_ + +unsigned int +nf_nat_redirect_ipv4(struct sk_buff *skb, + const struct nf_nat_ipv4_multi_range_compat *mr, + unsigned int hooknum); +unsigned int +nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, + unsigned int hooknum); + +#endif /* _NF_NAT_REDIRECT_H_ */ diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h new file mode 100644 index 0000000000000000000000000000000000000000..511fb79f6dadfe02bd7b3753d489f76fdb4085cd --- /dev/null +++ b/include/net/netfilter/nf_tables_bridge.h @@ -0,0 +1,7 @@ +#ifndef _NET_NF_TABLES_BRIDGE_H +#define _NET_NF_TABLES_BRIDGE_H + +int nft_bridge_iphdr_validate(struct sk_buff *skb); +int nft_bridge_ip6hdr_validate(struct sk_buff *skb); + +#endif /* _NET_NF_TABLES_BRIDGE_H */ diff --git a/include/net/netfilter/nft_redir.h b/include/net/netfilter/nft_redir.h new file mode 100644 index 0000000000000000000000000000000000000000..a2d67546afab0d7dbd1d7c7b825e04d8a929dea0 --- /dev/null +++ b/include/net/netfilter/nft_redir.h @@ -0,0 +1,21 @@ +#ifndef _NFT_REDIR_H_ +#define _NFT_REDIR_H_ + +struct nft_redir { + enum nft_registers sreg_proto_min:8; + enum nft_registers sreg_proto_max:8; + u16 flags; +}; + +extern const struct nla_policy nft_redir_policy[]; + +int nft_redir_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr); + +int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nft_data **data); + +#endif /* _NFT_REDIR_H_ */ diff --git a/include/net/netlink.h b/include/net/netlink.h index 7b903e1bdbbb0e38d71a7b7cb9d8e8911918226e..64158353ecb2750a3165dc07e915755ccb801522 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1185,4 +1185,14 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, #define nla_for_each_nested(pos, nla, rem) \ nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem) +/** + * nla_is_last - Test if attribute is last in stream + * @nla: attribute to test + * @rem: bytes remaining in stream + */ +static inline bool nla_is_last(const struct nlattr *nla, int rem) +{ + return nla->nla_len == rem; +} + #endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 9da798256f0e4dff710273ce96fa108d862a9457..730d82ad6ee521beddc3af344d8e99a339f74dd5 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -50,8 +50,8 @@ struct netns_xfrm { struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; - struct hlist_head policy_inexact[XFRM_POLICY_MAX * 2]; - struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; + struct hlist_head policy_inexact[XFRM_POLICY_MAX]; + struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX]; unsigned int policy_count[XFRM_POLICY_MAX * 2]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h index d9a5cf7ac1c4750c8418ce867366089e1f133bdc..0ae101eef0f44a89271368d858aae83f030b2e26 100644 --- a/include/net/nfc/digital.h +++ b/include/net/nfc/digital.h @@ -225,6 +225,19 @@ struct nfc_digital_dev { u8 curr_protocol; u8 curr_rf_tech; u8 curr_nfc_dep_pni; + u8 did; + + u8 local_payload_max; + u8 remote_payload_max; + + struct sk_buff *chaining_skb; + struct digital_data_exch *data_exch; + + int atn_count; + int nack_count; + + struct sk_buff *saved_skb; + unsigned int saved_skb_len; u16 target_fsc; diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 7ee8f4cc610bebecb119ce28566604e1cd6c3f3c..14bd0e1c47fae31ba0ae70669dde92741a81cc64 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -57,10 +57,14 @@ struct nfc_hci_ops { int (*discover_se)(struct nfc_hci_dev *dev); int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx); int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx); + int (*se_io)(struct nfc_hci_dev *dev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context); }; /* Pipes */ #define NFC_HCI_INVALID_PIPE 0x80 +#define NFC_HCI_DO_NOT_CREATE_PIPE 0x81 #define NFC_HCI_LINK_MGMT_PIPE 0x00 #define NFC_HCI_ADMIN_PIPE 0x01 diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index 9eca9ae2280c57245a4d40b3a17164a98859ccf0..e7257a4653b40a9c4a1c15d938a0ede07ac94fcd 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -28,6 +28,8 @@ #ifndef __NCI_H #define __NCI_H +#include + /* NCI constants */ #define NCI_MAX_NUM_MAPPING_CONFIGS 10 #define NCI_MAX_NUM_RF_CONFIGS 10 @@ -73,6 +75,8 @@ #define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 #define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 +#define NCI_RF_TECH_MODE_LISTEN_MASK 0x80 + /* NCI RF Technologies */ #define NCI_NFC_RF_TECHNOLOGY_A 0x00 #define NCI_NFC_RF_TECHNOLOGY_B 0x01 @@ -106,6 +110,17 @@ /* NCI Configuration Parameter Tags */ #define NCI_PN_ATR_REQ_GEN_BYTES 0x29 +#define NCI_LN_ATR_RES_GEN_BYTES 0x61 +#define NCI_LA_SEL_INFO 0x32 +#define NCI_LF_PROTOCOL_TYPE 0x50 +#define NCI_LF_CON_BITR_F 0x54 + +/* NCI Configuration Parameters masks */ +#define NCI_LA_SEL_INFO_ISO_DEP_MASK 0x20 +#define NCI_LA_SEL_INFO_NFC_DEP_MASK 0x40 +#define NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK 0x02 +#define NCI_LF_CON_BITR_F_212 0x02 +#define NCI_LF_CON_BITR_F_424 0x04 /* NCI Reset types */ #define NCI_RESET_TYPE_KEEP_CONFIG 0x00 @@ -314,26 +329,31 @@ struct nci_core_intf_error_ntf { struct rf_tech_specific_params_nfca_poll { __u16 sens_res; __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */ - __u8 nfcid1[10]; + __u8 nfcid1[NFC_NFCID1_MAXSIZE]; __u8 sel_res_len; /* 0 or 1 Bytes */ __u8 sel_res; } __packed; struct rf_tech_specific_params_nfcb_poll { __u8 sensb_res_len; - __u8 sensb_res[12]; /* 11 or 12 Bytes */ + __u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; /* 11 or 12 Bytes */ } __packed; struct rf_tech_specific_params_nfcf_poll { __u8 bit_rate; __u8 sensf_res_len; - __u8 sensf_res[18]; /* 16 or 18 Bytes */ + __u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; /* 16 or 18 Bytes */ } __packed; struct rf_tech_specific_params_nfcv_poll { __u8 res_flags; __u8 dsfid; - __u8 uid[8]; /* 8 Bytes */ + __u8 uid[NFC_ISO15693_UID_MAXSIZE]; /* 8 Bytes */ +} __packed; + +struct rf_tech_specific_params_nfcf_listen { + __u8 local_nfcid2_len; + __u8 local_nfcid2[NFC_NFCID2_MAXSIZE]; /* 0 or 8 Bytes */ } __packed; struct nci_rf_discover_ntf { @@ -365,7 +385,12 @@ struct activation_params_nfcb_poll_iso_dep { struct activation_params_poll_nfc_dep { __u8 atr_res_len; - __u8 atr_res[63]; + __u8 atr_res[NFC_ATR_RES_MAXSIZE - 2]; /* ATR_RES from byte 3 */ +}; + +struct activation_params_listen_nfc_dep { + __u8 atr_req_len; + __u8 atr_req[NFC_ATR_REQ_MAXSIZE - 2]; /* ATR_REQ from byte 3 */ }; struct nci_rf_intf_activated_ntf { @@ -382,6 +407,7 @@ struct nci_rf_intf_activated_ntf { struct rf_tech_specific_params_nfcb_poll nfcb_poll; struct rf_tech_specific_params_nfcf_poll nfcf_poll; struct rf_tech_specific_params_nfcv_poll nfcv_poll; + struct rf_tech_specific_params_nfcf_listen nfcf_listen; } rf_tech_specific_params; __u8 data_exch_rf_tech_and_mode; @@ -393,6 +419,7 @@ struct nci_rf_intf_activated_ntf { struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep; struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep; struct activation_params_poll_nfc_dep poll_nfc_dep; + struct activation_params_listen_nfc_dep listen_nfc_dep; } activation_params; } __packed; diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 75d10e625c4969b0ddc806e5588815347ad9e73d..9e51bb4d841ea4915c2e70f0d7187b656ae27cab 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -4,6 +4,7 @@ * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2013 Intel Corporation. All rights reserved. + * Copyright (C) 2014 Marvell International Ltd. * * Written by Ilan Elias * @@ -49,6 +50,8 @@ enum nci_state { NCI_W4_ALL_DISCOVERIES, NCI_W4_HOST_SELECT, NCI_POLL_ACTIVE, + NCI_LISTEN_ACTIVE, + NCI_LISTEN_SLEEP, }; /* NCI timeouts */ @@ -69,6 +72,12 @@ struct nci_ops { int (*send)(struct nci_dev *ndev, struct sk_buff *skb); int (*setup)(struct nci_dev *ndev); __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol); + int (*discover_se)(struct nci_dev *ndev); + int (*disable_se)(struct nci_dev *ndev, u32 se_idx); + int (*enable_se)(struct nci_dev *ndev, u32 se_idx); + int (*se_io)(struct nci_dev *ndev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context); }; #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 6c583e244de2198d41effbf8a21086296a2c7e40..12adb817c27a3f157f351915e38ecc131fd52e82 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -1,5 +1,6 @@ /* * Copyright (C) 2011 Instituto Nokia de Tecnologia + * Copyright (C) 2014 Marvell International Ltd. * * Authors: * Lauro Ramos Venancio @@ -87,6 +88,7 @@ struct nfc_ops { #define NFC_TARGET_IDX_ANY -1 #define NFC_MAX_GT_LEN 48 #define NFC_ATR_RES_GT_OFFSET 15 +#define NFC_ATR_REQ_GT_OFFSET 14 /** * struct nfc_target - NFC target descriptiom diff --git a/include/net/nl802154.h b/include/net/nl802154.h index b23548e0409848530b1c7f94fb805601c46e583c..6dbd406ca41b94d9f4c7242b8b1d94e33bf570b8 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -1,126 +1,122 @@ +#ifndef __NL802154_H +#define __NL802154_H /* - * nl802154.h + * 802.15.4 netlink interface public header * - * Copyright (C) 2007, 2008, 2009 Siemens AG + * Copyright 2014 Alexander Aring * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ -#ifndef IEEE802154_NL_H -#define IEEE802154_NL_H +#define NL802154_GENL_NAME "nl802154" -struct net_device; -struct ieee802154_addr; +enum nl802154_commands { +/* don't change the order or add anything between, this is ABI! */ +/* currently we don't shipping this file via uapi, ignore the above one */ + NL802154_CMD_UNSPEC, -/** - * ieee802154_nl_assoc_indic - Notify userland of an association request. - * @dev: The network device on which this association request was - * received. - * @addr: The address of the device requesting association. - * @cap: The capability information field from the device. - * - * This informs a userland coordinator of a device requesting to - * associate with the PAN controlled by the coordinator. - * - * Note: This is in section 7.3.1 of the IEEE 802.15.4-2006 document. - */ -int ieee802154_nl_assoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, u8 cap); - -/** - * ieee802154_nl_assoc_confirm - Notify userland of association. - * @dev: The device which has completed association. - * @short_addr: The short address assigned to the device. - * @status: The status of the association. - * - * Inform userland of the result of an association request. If the - * association request included asking the coordinator to allocate - * a short address then it is returned in @short_addr. - * - * Note: This is in section 7.3.2 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_assoc_confirm(struct net_device *dev, - __le16 short_addr, u8 status); - -/** - * ieee802154_nl_disassoc_indic - Notify userland of disassociation. - * @dev: The device on which disassociation was indicated. - * @addr: The device which is disassociating. - * @reason: The reason for the disassociation. - * - * Inform userland that a device has disassociated from the network. - * - * Note: This is in section 7.3.3 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_disassoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, u8 reason); - -/** - * ieee802154_nl_disassoc_confirm - Notify userland of disassociation - * completion. - * @dev: The device on which disassociation was ordered. - * @status: The result of the disassociation. - * - * Inform userland of the result of requesting that a device - * disassociate, or the result of requesting that we disassociate from - * a PAN managed by another coordinator. - * - * Note: This is in section 7.1.4.3 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_disassoc_confirm(struct net_device *dev, - u8 status); - -/** - * ieee802154_nl_scan_confirm - Notify userland of completion of scan. - * @dev: The device which was instructed to scan. - * @status: The status of the scan operation. - * @scan_type: What type of scan was performed. - * @unscanned: Any channels that the device was unable to scan. - * @edl: The energy levels (if a passive scan). - * - * - * Note: This is in section 7.1.11 of the IEEE 802.15.4 document. - * Note: This API does not permit the return of an active scan result. - */ -int ieee802154_nl_scan_confirm(struct net_device *dev, - u8 status, u8 scan_type, u32 unscanned, u8 page, - u8 *edl/*, struct list_head *pan_desc_list */); - -/** - * ieee802154_nl_beacon_indic - Notify userland of a received beacon. - * @dev: The device on which a beacon was received. - * @panid: The PAN of the coordinator. - * @coord_addr: The short address of the coordinator on that PAN. - * - * Note: This is in section 7.1.5 of the IEEE 802.15.4 document. - * Note: This API does not provide extended information such as what - * channel the PAN is on or what the LQI of the beacon frame was on - * receipt. - * Note: This API cannot indicate a beacon frame for a coordinator - * operating in long addressing mode. - */ -int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid, - __le16 coord_addr); + NL802154_CMD_GET_WPAN_PHY, /* can dump */ + NL802154_CMD_SET_WPAN_PHY, + NL802154_CMD_NEW_WPAN_PHY, + NL802154_CMD_DEL_WPAN_PHY, -/** - * ieee802154_nl_start_confirm - Notify userland of completion of start. - * @dev: The device which was instructed to scan. - * @status: The status of the scan operation. - * - * Note: This is in section 7.1.14 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_start_confirm(struct net_device *dev, u8 status); + NL802154_CMD_GET_INTERFACE, /* can dump */ + NL802154_CMD_SET_INTERFACE, + NL802154_CMD_NEW_INTERFACE, + NL802154_CMD_DEL_INTERFACE, + + NL802154_CMD_SET_CHANNEL, + + NL802154_CMD_SET_PAN_ID, + NL802154_CMD_SET_SHORT_ADDR, + + NL802154_CMD_SET_TX_POWER, + NL802154_CMD_SET_CCA_MODE, + NL802154_CMD_SET_CCA_ED_LEVEL, + + NL802154_CMD_SET_MAX_FRAME_RETRIES, + + NL802154_CMD_SET_BACKOFF_EXPONENT, + NL802154_CMD_SET_MAX_CSMA_BACKOFFS, + + NL802154_CMD_SET_LBT_MODE, + + /* add new commands above here */ + + /* used to define NL802154_CMD_MAX below */ + __NL802154_CMD_AFTER_LAST, + NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1 +}; + +enum nl802154_attrs { +/* don't change the order or add anything between, this is ABI! */ +/* currently we don't shipping this file via uapi, ignore the above one */ + NL802154_ATTR_UNSPEC, + + NL802154_ATTR_WPAN_PHY, + NL802154_ATTR_WPAN_PHY_NAME, + + NL802154_ATTR_IFINDEX, + NL802154_ATTR_IFNAME, + NL802154_ATTR_IFTYPE, + + NL802154_ATTR_WPAN_DEV, + + NL802154_ATTR_PAGE, + NL802154_ATTR_CHANNEL, + + NL802154_ATTR_PAN_ID, + NL802154_ATTR_SHORT_ADDR, + + NL802154_ATTR_TX_POWER, + + NL802154_ATTR_CCA_MODE, + NL802154_ATTR_CCA_MODE3_AND, + NL802154_ATTR_CCA_ED_LEVEL, + + NL802154_ATTR_MAX_FRAME_RETRIES, + + NL802154_ATTR_MAX_BE, + NL802154_ATTR_MIN_BE, + NL802154_ATTR_MAX_CSMA_BACKOFFS, + + NL802154_ATTR_LBT_MODE, + + NL802154_ATTR_GENERATION, + + NL802154_ATTR_CHANNELS_SUPPORTED, + NL802154_ATTR_SUPPORTED_CHANNEL, + + NL802154_ATTR_EXTENDED_ADDR, + + /* add attributes here, update the policy in nl802154.c */ + + __NL802154_ATTR_AFTER_LAST, + NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1 +}; + +enum nl802154_iftype { + /* for backwards compatibility TODO */ + NL802154_IFTYPE_UNSPEC = -1, + + NL802154_IFTYPE_NODE, + NL802154_IFTYPE_MONITOR, + NL802154_IFTYPE_COORD, + + /* keep last */ + NUM_NL802154_IFTYPES, + NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1 +}; -#endif +#endif /* __NL802154_H */ diff --git a/include/net/ping.h b/include/net/ping.h index 026479b61a2d2eb713195564f4eb7fca8ed6c159..f074060bc5de763a3488054db67a67e7a0a9ab00 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -82,7 +82,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); -void ping_rcv(struct sk_buff *skb); +bool ping_rcv(struct sk_buff *skb); #ifdef CONFIG_PROC_FS struct ping_seq_afinfo { diff --git a/include/net/regulatory.h b/include/net/regulatory.h index dad7ab20a8cb204f08b2ecda724d5c1db5138ec8..b776d72d84be8f03fde9650c4b90d1b17940d543 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -136,6 +136,17 @@ struct regulatory_request { * otherwise initiating radiation is not allowed. This will enable the * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration * option + * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure + * all interfaces on this wiphy reside on allowed channels. If this flag + * is not set, upon a regdomain change, the interfaces are given a grace + * period (currently 60 seconds) to disconnect or move to an allowed + * channel. Interfaces on forbidden channels are forcibly disconnected. + * Currently these types of interfaces are supported for enforcement: + * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP, + * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR, + * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO, + * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device + * includes any modes unsupported for enforcement checking. */ enum ieee80211_regulatory_flags { REGULATORY_CUSTOM_REG = BIT(0), @@ -144,6 +155,7 @@ enum ieee80211_regulatory_flags { REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3), REGULATORY_COUNTRY_IE_IGNORE = BIT(4), REGULATORY_ENABLE_RELAX_NO_IR = BIT(5), + REGULATORY_IGNORE_STALE_KICKOFF = BIT(6), }; struct ieee80211_freq_range { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index d17ed6fb2f7043d84011fbf471b06e10bdf06db4..3d282cbb66bf1015d28f140ed3bc031ac43afaed 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -219,7 +219,6 @@ struct tcf_proto_ops { void (*destroy)(struct tcf_proto*); unsigned long (*get)(struct tcf_proto*, u32 handle); - void (*put)(struct tcf_proto*, unsigned long); int (*change)(struct net *net, struct sk_buff *, struct tcf_proto*, unsigned long, u32 handle, struct nlattr **, diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 72a31db47ded2837f967247cc17840aed90197ab..487ef34bbd63ff1cfe511c7ee8b1501593a14de3 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -219,7 +219,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *, const struct sctp_chunk *, __u32 tsn); struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, - const struct msghdr *, size_t msg_len); + struct msghdr *, size_t msg_len); struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 4ff3f67be62caa2676b396fe1ff35076929415c1..2bb2fcf5b11f0387c81b860ad2d3a6607da19a7d 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -531,7 +531,7 @@ struct sctp_datamsg { struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, struct sctp_sndrcvinfo *, - struct msghdr *, int len); + struct iov_iter *); void sctp_datamsg_free(struct sctp_datamsg *); void sctp_datamsg_put(struct sctp_datamsg *); void sctp_chunk_fail(struct sctp_chunk *, int error); @@ -647,8 +647,8 @@ struct sctp_chunk { void sctp_chunk_hold(struct sctp_chunk *); void sctp_chunk_put(struct sctp_chunk *); -int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, - struct iovec *data); +int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len, + struct iov_iter *from); void sctp_chunk_free(struct sctp_chunk *); void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); struct sctp_chunk *sctp_chunkify(struct sk_buff *, @@ -1116,7 +1116,6 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, sctp_scope_t sctp_scope(const union sctp_addr *); int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope); int sctp_is_any(struct sock *sk, const union sctp_addr *addr); -int sctp_addr_is_valid(const union sctp_addr *addr); int sctp_is_ep_boundall(struct sock *sk); diff --git a/include/net/sock.h b/include/net/sock.h index 7ff44e062a386646126855bd8793bd51e4a54793..c3e83c9a8ab82f7dda0e3ed0e0f701bf9d6df362 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -273,6 +273,7 @@ struct cg_proto; * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting * @sk_rxhash: flow hash received from netif layer + * @sk_incoming_cpu: record cpu processing incoming packets * @sk_txhash: computed flow hash for use on transmit * @sk_filter: socket filtering instructions * @sk_protinfo: private area, net family specific, when not using slab @@ -350,6 +351,12 @@ struct sock { #ifdef CONFIG_RPS __u32 sk_rxhash; #endif + u16 sk_incoming_cpu; + /* 16bit hole + * Warned : sk_incoming_cpu can be set from softirq, + * Do not use this hole without fully understanding possible issues. + */ + __u32 sk_txhash; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sk_napi_id; @@ -833,6 +840,11 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) return sk->sk_backlog_rcv(sk, skb); } +static inline void sk_incoming_cpu_update(struct sock *sk) +{ + sk->sk_incoming_cpu = raw_smp_processor_id(); +} + static inline void sock_rps_record_flow_hash(__u32 hash) { #ifdef CONFIG_RPS @@ -1865,29 +1877,6 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from, return 0; } -static inline int skb_copy_to_page(struct sock *sk, char __user *from, - struct sk_buff *skb, struct page *page, - int off, int copy) -{ - if (skb->ip_summed == CHECKSUM_NONE) { - int err = 0; - __wsum csum = csum_and_copy_from_user(from, - page_address(page) + off, - copy, 0, &err); - if (err) - return err; - skb->csum = csum_block_add(skb->csum, csum, skb->len); - } else if (copy_from_user(page_address(page) + off, from, copy)) - return -EFAULT; - - skb->len += copy; - skb->data_len += copy; - skb->truesize += copy; - sk->sk_wmem_queued += copy; - sk_mem_charge(sk, copy); - return 0; -} - /** * sk_wmem_alloc_get - returns write allocations * @sk: socket @@ -2269,16 +2258,6 @@ bool sk_ns_capable(const struct sock *sk, bool sk_capable(const struct sock *sk, int cap); bool sk_net_capable(const struct sock *sk, int cap); -/* - * Enable debug/info messages - */ -extern int net_msg_warn; -#define NETDEBUG(fmt, args...) \ - do { if (net_msg_warn) printk(fmt,##args); } while (0) - -#define LIMIT_NETDEBUG(fmt, args...) \ - do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0) - extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; diff --git a/include/net/switchdev.h b/include/net/switchdev.h new file mode 100644 index 0000000000000000000000000000000000000000..8a6d1641fd9bb8d1c3ddc053c479a9f0531b5b18 --- /dev/null +++ b/include/net/switchdev.h @@ -0,0 +1,37 @@ +/* + * include/net/switchdev.h - Switch device API + * Copyright (c) 2014 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _LINUX_SWITCHDEV_H_ +#define _LINUX_SWITCHDEV_H_ + +#include + +#ifdef CONFIG_NET_SWITCHDEV + +int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid); +int netdev_switch_port_stp_update(struct net_device *dev, u8 state); + +#else + +static inline int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + return -EOPNOTSUPP; +} + +static inline int netdev_switch_port_stp_update(struct net_device *dev, + u8 state) +{ + return -EOPNOTSUPP; +} + +#endif + +#endif /* _LINUX_SWITCHDEV_H_ */ diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h new file mode 100644 index 0000000000000000000000000000000000000000..93b70ade1ff3fb9a66ec1178d652fe3083cd338c --- /dev/null +++ b/include/net/tc_act/tc_vlan.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2014 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __NET_TC_VLAN_H +#define __NET_TC_VLAN_H + +#include + +#define VLAN_F_POP 0x1 +#define VLAN_F_PUSH 0x2 + +struct tcf_vlan { + struct tcf_common common; + int tcfv_action; + u16 tcfv_push_vid; + __be16 tcfv_push_proto; +}; +#define to_vlan(a) \ + container_of(a->priv, struct tcf_vlan, common) + +#endif /* __NET_TC_VLAN_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 4062b4f0d121934a98f0c8f8f32dae012c68f2b4..f50f29faf76f1fbcc5237de63c76f7c8200f4a6b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -55,9 +55,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_HEADER (128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 -/* +/* * Never offer a window over 32767 without using window scaling. Some - * poor stacks do signed 16bit maths! + * poor stacks do signed 16bit maths! */ #define MAX_TCP_WINDOW 32767U @@ -70,9 +70,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* After receiving this amount of duplicate ACKs fast retransmit starts. */ #define TCP_FASTRETRANS_THRESH 3 -/* Maximal reordering. */ -#define TCP_MAX_REORDERING 127 - /* Maximal number of ACKs sent quickly to accelerate slow-start. */ #define TCP_MAX_QUICKACKS 16U @@ -167,7 +164,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* * TCP option */ - + #define TCPOPT_NOP 1 /* Padding */ #define TCPOPT_EOL 0 /* End of options */ #define TCPOPT_MSS 2 /* Segment size negotiating */ @@ -252,6 +249,7 @@ extern int sysctl_tcp_abort_on_overflow; extern int sysctl_tcp_max_orphans; extern int sysctl_tcp_fack; extern int sysctl_tcp_reordering; +extern int sysctl_tcp_max_reordering; extern int sysctl_tcp_dsack; extern long sysctl_tcp_mem[3]; extern int sysctl_tcp_wmem[3]; @@ -492,17 +490,16 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); __u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mss); -#endif - __u32 cookie_init_timestamp(struct request_sock *req); -bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net, - bool *ecn_ok); +bool cookie_timestamp_decode(struct tcp_options_received *opt); +bool cookie_ecn_ok(const struct tcp_options_received *opt, + const struct net *net, const struct dst_entry *dst); /* From net/ipv6/syncookies.c */ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); -#ifdef CONFIG_SYN_COOKIES + u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, @@ -1104,16 +1101,16 @@ static inline int tcp_win_from_space(int space) space - (space>>sysctl_tcp_adv_win_scale); } -/* Note: caller must be prepared to deal with negative returns */ +/* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { return tcp_win_from_space(sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)); -} +} static inline int tcp_full_space(const struct sock *sk) { - return tcp_win_from_space(sk->sk_rcvbuf); + return tcp_win_from_space(sk->sk_rcvbuf); } static inline void tcp_openreq_init(struct request_sock *req, diff --git a/include/net/udplite.h b/include/net/udplite.h index 2caadabcd07baf6552098082fcb78672f772dcb5..ae7c8d1fbcadbccd635edc56a54dba6d94563170 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -19,7 +19,9 @@ extern struct udp_table udplite_table; static __inline__ int udplite_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { - return memcpy_fromiovecend(to, (struct iovec *) from, offset, len); + struct msghdr *msg = from; + /* XXX: stripping const */ + return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len); } /* Designate sk as UDP-Lite socket */ @@ -40,7 +42,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) * checksum. UDP-Lite (like IPv6) mandates checksums, hence packets * with a zero checksum field are illegal. */ if (uh->check == 0) { - LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: zeroed checksum field\n"); + net_dbg_ratelimited("UDPLite: zeroed checksum field\n"); return 1; } @@ -52,8 +54,8 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) /* * Coverage length violates RFC 3828: log and discard silently. */ - LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: bad csum coverage %d/%d\n", - cscov, skb->len); + net_dbg_ratelimited("UDPLite: bad csum coverage %d/%d\n", + cscov, skb->len); return 1; } else if (cscov < skb->len) { diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h deleted file mode 100644 index 10ab0fc6d4f79a8a608d0ce642aae294636dcecc..0000000000000000000000000000000000000000 --- a/include/net/wpan-phy.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (C) 2007, 2008, 2009 Siemens AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Written by: - * Dmitry Eremin-Solenikov - */ - -#ifndef WPAN_PHY_H -#define WPAN_PHY_H - -#include -#include -#include - -/* According to the IEEE 802.15.4 stadard the upper most significant bits of - * the 32-bit channel bitmaps shall be used as an integer value to specify 32 - * possible channel pages. The lower 27 bits of the channel bit map shall be - * used as a bit mask to specify channel numbers within a channel page. - */ -#define WPAN_NUM_CHANNELS 27 -#define WPAN_NUM_PAGES 32 - -struct wpan_phy { - struct mutex pib_lock; - - /* - * This is a PIB according to 802.15.4-2011. - * We do not provide timing-related variables, as they - * aren't used outside of driver - */ - u8 current_channel; - u8 current_page; - u32 channels_supported[32]; - s8 transmit_power; - u8 cca_mode; - u8 min_be; - u8 max_be; - u8 csma_retries; - s8 frame_retries; - - bool lbt; - s32 cca_ed_level; - - struct device dev; - int idx; - - struct net_device *(*add_iface)(struct wpan_phy *phy, - const char *name, int type); - void (*del_iface)(struct wpan_phy *phy, struct net_device *dev); - - int (*set_txpower)(struct wpan_phy *phy, int db); - int (*set_lbt)(struct wpan_phy *phy, bool on); - int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode); - int (*set_cca_ed_level)(struct wpan_phy *phy, int level); - int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be, - u8 retries); - int (*set_frame_retries)(struct wpan_phy *phy, s8 retries); - - char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); -}; - -#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) - -struct wpan_phy *wpan_phy_alloc(size_t priv_size); -static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) -{ - phy->dev.parent = dev; -} -int wpan_phy_register(struct wpan_phy *phy); -void wpan_phy_unregister(struct wpan_phy *phy); -void wpan_phy_free(struct wpan_phy *phy); -/* Same semantics as for class_for_each_device */ -int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data); - -static inline void *wpan_phy_priv(struct wpan_phy *phy) -{ - BUG_ON(!phy); - return &phy->priv; -} - -struct wpan_phy *wpan_phy_find(const char *str); - -static inline void wpan_phy_put(struct wpan_phy *phy) -{ - put_device(&phy->dev); -} - -static inline const char *wpan_phy_name(struct wpan_phy *phy) -{ - return dev_name(&phy->dev); -} -#endif diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index ea0796bdcf88404ef0f127eb6e64ba00c16ea856..5c15c2a5c1235157e75741cdcef290c61709f3a8 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -82,4 +82,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index c54fcb5993c31a5eb3eb24074e3d2bf8b6d42334..d8e1716707baed8db48ccae88a299fc9106544ba 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -37,27 +37,27 @@ header-y += aio_abi.h header-y += apm_bios.h header-y += arcfb.h header-y += atalk.h -header-y += atm.h -header-y += atm_eni.h -header-y += atm_he.h -header-y += atm_idt77105.h -header-y += atm_nicstar.h -header-y += atm_tcp.h -header-y += atm_zatm.h header-y += atmapi.h header-y += atmarp.h header-y += atmbr2684.h header-y += atmclip.h header-y += atmdev.h +header-y += atm_eni.h +header-y += atm.h +header-y += atm_he.h +header-y += atm_idt77105.h header-y += atmioc.h header-y += atmlec.h header-y += atmmpc.h +header-y += atm_nicstar.h header-y += atmppp.h header-y += atmsap.h header-y += atmsvc.h +header-y += atm_tcp.h +header-y += atm_zatm.h header-y += audit.h -header-y += auto_fs.h header-y += auto_fs4.h +header-y += auto_fs.h header-y += auxvec.h header-y += ax25.h header-y += b1lli.h @@ -67,8 +67,8 @@ header-y += bfs_fs.h header-y += binfmts.h header-y += blkpg.h header-y += blktrace_api.h -header-y += bpf.h header-y += bpf_common.h +header-y += bpf.h header-y += bpqether.h header-y += bsg.h header-y += btrfs.h @@ -93,21 +93,21 @@ header-y += cyclades.h header-y += cycx_cfm.h header-y += dcbnl.h header-y += dccp.h -header-y += dlm.h +header-y += dlmconstants.h header-y += dlm_device.h +header-y += dlm.h header-y += dlm_netlink.h header-y += dlm_plock.h -header-y += dlmconstants.h header-y += dm-ioctl.h header-y += dm-log-userspace.h header-y += dn.h header-y += dqblk_xfs.h header-y += edd.h header-y += efs_fs_sb.h +header-y += elfcore.h header-y += elf-em.h header-y += elf-fdpic.h header-y += elf.h -header-y += elfcore.h header-y += errno.h header-y += errqueue.h header-y += ethtool.h @@ -131,15 +131,15 @@ header-y += fsl_hypervisor.h header-y += fuse.h header-y += futex.h header-y += gameport.h -header-y += gen_stats.h header-y += genetlink.h +header-y += gen_stats.h header-y += gfs2_ondisk.h header-y += gigaset_dev.h -header-y += hdlc.h header-y += hdlcdrv.h +header-y += hdlc.h header-y += hdreg.h -header-y += hid.h header-y += hiddev.h +header-y += hid.h header-y += hidraw.h header-y += hpet.h header-y += hsr_netlink.h @@ -151,7 +151,6 @@ header-y += i2o-dev.h header-y += i8k.h header-y += icmp.h header-y += icmpv6.h -header-y += if.h header-y += if_addr.h header-y += if_addrlabel.h header-y += if_alg.h @@ -165,6 +164,7 @@ header-y += if_ether.h header-y += if_fc.h header-y += if_fddi.h header-y += if_frad.h +header-y += if.h header-y += if_hippi.h header-y += if_infiniband.h header-y += if_link.h @@ -182,40 +182,40 @@ header-y += if_tunnel.h header-y += if_vlan.h header-y += if_x25.h header-y += igmp.h -header-y += in.h header-y += in6.h -header-y += in_route.h header-y += inet_diag.h +header-y += in.h header-y += inotify.h header-y += input.h +header-y += in_route.h header-y += ioctl.h -header-y += ip.h header-y += ip6_tunnel.h -header-y += ip_vs.h header-y += ipc.h +header-y += ip.h header-y += ipmi.h header-y += ipmi_msgdefs.h header-y += ipsec.h header-y += ipv6.h header-y += ipv6_route.h +header-y += ip_vs.h header-y += ipx.h header-y += irda.h header-y += irqnr.h -header-y += isdn.h header-y += isdn_divertif.h -header-y += isdn_ppp.h +header-y += isdn.h header-y += isdnif.h +header-y += isdn_ppp.h header-y += iso_fs.h -header-y += ivtv.h header-y += ivtvfb.h +header-y += ivtv.h header-y += ixjuser.h header-y += jffs2.h header-y += joystick.h -header-y += kd.h header-y += kdev_t.h -header-y += kernel-page-flags.h -header-y += kernel.h +header-y += kd.h header-y += kernelcapi.h +header-y += kernel.h +header-y += kernel-page-flags.h header-y += kexec.h header-y += keyboard.h header-y += keyctl.h @@ -231,6 +231,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h \ header-y += kvm_para.h endif +header-y += hw_breakpoint.h header-y += l2tp.h header-y += libc-compat.h header-y += limits.h @@ -256,43 +257,43 @@ header-y += mman.h header-y += mmtimer.h header-y += mpls.h header-y += mqueue.h -header-y += mroute.h header-y += mroute6.h +header-y += mroute.h header-y += msdos_fs.h header-y += msg.h header-y += mtio.h -header-y += n_r3964.h header-y += nbd.h -header-y += ncp.h header-y += ncp_fs.h +header-y += ncp.h header-y += ncp_mount.h header-y += ncp_no.h header-y += neighbour.h -header-y += net.h -header-y += net_dropmon.h -header-y += net_tstamp.h header-y += netconf.h header-y += netdevice.h -header-y += netlink_diag.h -header-y += netfilter.h +header-y += net_dropmon.h header-y += netfilter_arp.h header-y += netfilter_bridge.h header-y += netfilter_decnet.h +header-y += netfilter.h header-y += netfilter_ipv4.h header-y += netfilter_ipv6.h +header-y += net.h +header-y += netlink_diag.h header-y += netlink.h header-y += netrom.h +header-y += net_tstamp.h header-y += nfc.h -header-y += nfs.h header-y += nfs2.h header-y += nfs3.h header-y += nfs4.h header-y += nfs4_mount.h +header-y += nfsacl.h header-y += nfs_fs.h +header-y += nfs.h header-y += nfs_idmap.h header-y += nfs_mount.h -header-y += nfsacl.h header-y += nl80211.h +header-y += n_r3964.h header-y += nubus.h header-y += nvme.h header-y += nvram.h @@ -312,16 +313,16 @@ header-y += pfkeyv2.h header-y += pg.h header-y += phantom.h header-y += phonet.h +header-y += pktcdvd.h header-y += pkt_cls.h header-y += pkt_sched.h -header-y += pktcdvd.h header-y += pmu.h header-y += poll.h header-y += posix_types.h header-y += ppdev.h header-y += ppp-comp.h -header-y += ppp-ioctl.h header-y += ppp_defs.h +header-y += ppp-ioctl.h header-y += pps.h header-y += prctl.h header-y += psci.h @@ -353,13 +354,13 @@ header-y += seccomp.h header-y += securebits.h header-y += selinux_netlink.h header-y += sem.h -header-y += serial.h header-y += serial_core.h +header-y += serial.h header-y += serial_reg.h header-y += serio.h header-y += shm.h -header-y += signal.h header-y += signalfd.h +header-y += signal.h header-y += smiapp.h header-y += snmp.h header-y += sock_diag.h @@ -368,8 +369,8 @@ header-y += sockios.h header-y += som.h header-y += sonet.h header-y += sonypi.h -header-y += sound.h header-y += soundcard.h +header-y += sound.h header-y += stat.h header-y += stddef.h header-y += string.h @@ -388,11 +389,12 @@ header-y += time.h header-y += times.h header-y += timex.h header-y += tiocl.h -header-y += tipc.h header-y += tipc_config.h +header-y += tipc_netlink.h +header-y += tipc.h header-y += toshiba.h -header-y += tty.h header-y += tty_flags.h +header-y += tty.h header-y += types.h header-y += udf_fs_i.h header-y += udp.h @@ -424,12 +426,12 @@ header-y += virtio_blk.h header-y += virtio_config.h header-y += virtio_console.h header-y += virtio_ids.h -header-y += virtio_types.h header-y += virtio_net.h header-y += virtio_pci.h header-y += virtio_ring.h header-y += virtio_rng.h header-y += virtio_scsi.h +header-y += virtio_types.h header-y += vm_sockets.h header-y += vt.h header-y += wait.h @@ -440,6 +442,5 @@ header-y += wireless.h header-y += x25.h header-y += xattr.h header-y += xfrm.h -header-y += hw_breakpoint.h header-y += zorro.h header-y += zorro_ids.h diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d18316f9e9c488b07f66a3dee1493336d9970d98..45da7ec7d2742235e05b2ea53ab19d80af64794f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -82,7 +82,7 @@ enum bpf_cmd { /* create or update key/value pair in a given map * err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size) - * Using attr->map_fd, attr->key, attr->value + * Using attr->map_fd, attr->key, attr->value, attr->flags * returns zero or negative error */ BPF_MAP_UPDATE_ELEM, @@ -111,12 +111,20 @@ enum bpf_cmd { enum bpf_map_type { BPF_MAP_TYPE_UNSPEC, + BPF_MAP_TYPE_HASH, + BPF_MAP_TYPE_ARRAY, }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, + BPF_PROG_TYPE_SOCKET_FILTER, }; +/* flags for BPF_MAP_UPDATE_ELEM command */ +#define BPF_ANY 0 /* create new element or update existing */ +#define BPF_NOEXIST 1 /* create new element if it didn't exist */ +#define BPF_EXIST 2 /* update existing element */ + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ @@ -132,6 +140,7 @@ union bpf_attr { __aligned_u64 value; __aligned_u64 next_key; }; + __u64 flags; }; struct { /* anonymous struct used by BPF_PROG_LOAD command */ @@ -150,6 +159,9 @@ union bpf_attr { */ enum bpf_func_id { BPF_FUNC_unspec, + BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */ + BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */ + BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ __BPF_FUNC_MAX_ID, }; diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h index c247446ab25a4e564a068ae97e154cf7972f0a1d..1c508be9687fc593dc4a5baee0e0e443ff4e8e71 100644 --- a/include/uapi/linux/can/error.h +++ b/include/uapi/linux/can/error.h @@ -71,6 +71,7 @@ #define CAN_ERR_CRTL_TX_PASSIVE 0x20 /* reached error passive status TX */ /* (at least one error counter exceeds */ /* the protocol-defined level of 127) */ +#define CAN_ERR_CRTL_ACTIVE 0x40 /* recovered to error active state */ /* error in CAN protocol (type) / data[2] */ #define CAN_ERR_PROT_UNSPEC 0x00 /* unspecified */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 99b43056a6feb3db4617143df6d7753bbf124ed2..5f66d9c2889d0a50017665579ff0699197970cb4 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -534,6 +534,7 @@ struct ethtool_pauseparam { * @ETH_SS_NTUPLE_FILTERS: Previously used with %ETHTOOL_GRXNTUPLE; * now deprecated * @ETH_SS_FEATURES: Device feature names + * @ETH_SS_RSS_HASH_FUNCS: RSS hush function names */ enum ethtool_stringset { ETH_SS_TEST = 0, @@ -541,6 +542,7 @@ enum ethtool_stringset { ETH_SS_PRIV_FLAGS, ETH_SS_NTUPLE_FILTERS, ETH_SS_FEATURES, + ETH_SS_RSS_HASH_FUNCS, }; /** @@ -884,6 +886,8 @@ struct ethtool_rxfh_indir { * @key_size: On entry, the array size of the user buffer for the hash key, * which may be zero. On return from %ETHTOOL_GRSSH, the size of the * hardware hash key. + * @hfunc: Defines the current RSS hash function used by HW (or to be set to). + * Valid values are one of the %ETH_RSS_HASH_*. * @rsvd: Reserved for future extensions. * @rss_config: RX ring/queue index for each hash value i.e., indirection table * of @indir_size __u32 elements, followed by hash key of @key_size @@ -893,14 +897,16 @@ struct ethtool_rxfh_indir { * size should be returned. For %ETHTOOL_SRSSH, an @indir_size of * %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested * and a @indir_size of zero means the indir table should be reset to default - * values. + * values. An hfunc of zero means that hash function setting is not requested. */ struct ethtool_rxfh { __u32 cmd; __u32 rss_context; __u32 indir_size; __u32 key_size; - __u32 rsvd[2]; + __u8 hfunc; + __u8 rsvd8[3]; + __u32 rsvd32; __u32 rss_config[0]; }; #define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff @@ -1213,6 +1219,10 @@ enum ethtool_sfeatures_retval_bits { #define SUPPORTED_40000baseCR4_Full (1 << 24) #define SUPPORTED_40000baseSR4_Full (1 << 25) #define SUPPORTED_40000baseLR4_Full (1 << 26) +#define SUPPORTED_56000baseKR4_Full (1 << 27) +#define SUPPORTED_56000baseCR4_Full (1 << 28) +#define SUPPORTED_56000baseSR4_Full (1 << 29) +#define SUPPORTED_56000baseLR4_Full (1 << 30) #define ADVERTISED_10baseT_Half (1 << 0) #define ADVERTISED_10baseT_Full (1 << 1) @@ -1241,6 +1251,10 @@ enum ethtool_sfeatures_retval_bits { #define ADVERTISED_40000baseCR4_Full (1 << 24) #define ADVERTISED_40000baseSR4_Full (1 << 25) #define ADVERTISED_40000baseLR4_Full (1 << 26) +#define ADVERTISED_56000baseKR4_Full (1 << 27) +#define ADVERTISED_56000baseCR4_Full (1 << 28) +#define ADVERTISED_56000baseSR4_Full (1 << 29) +#define ADVERTISED_56000baseLR4_Full (1 << 30) /* The following are all involved in forcing a particular link * mode for the device for setting things. When getting the @@ -1248,12 +1262,16 @@ enum ethtool_sfeatures_retval_bits { * it was forced up into this mode or autonegotiated. */ -/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ +/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 #define SPEED_2500 2500 #define SPEED_10000 10000 +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#define SPEED_56000 56000 + #define SPEED_UNKNOWN -1 /* Duplex, half or full. */ @@ -1343,6 +1361,10 @@ enum ethtool_sfeatures_retval_bits { #define ETH_MODULE_SFF_8079_LEN 256 #define ETH_MODULE_SFF_8472 0x2 #define ETH_MODULE_SFF_8472_LEN 512 +#define ETH_MODULE_SFF_8636 0x3 +#define ETH_MODULE_SFF_8636_LEN 256 +#define ETH_MODULE_SFF_8436 0x4 +#define ETH_MODULE_SFF_8436_LEN 256 /* Reset flags */ /* The reset() operation must clear the flags for the components which diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index da17e456908d2d16d1500499858296a042b3186c..b03ee8f62d3c2a019924e4aabf02fdc462c75ffb 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -105,6 +105,7 @@ struct __fdb_entry { #define BRIDGE_MODE_VEB 0 /* Default loopback mode */ #define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#define BRIDGE_MODE_UNDEF 0xFFFF /* mode undefined */ /* Bridge management nested attributes * [IFLA_AF_SPEC] = { diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 0bdb77e16875342ad86fe165727a1034a395b731..f7d0d2d7173aea9840ffe2a60d43021a9fed742a 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -145,6 +145,7 @@ enum { IFLA_CARRIER, IFLA_PHYS_PORT_ID, IFLA_CARRIER_CHANGES, + IFLA_PHYS_SWITCH_ID, __IFLA_MAX }; @@ -243,6 +244,8 @@ enum { IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ IFLA_BRPORT_LEARNING, /* mac learning */ IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ + IFLA_BRPORT_PROXYARP, /* proxy ARP */ + IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -329,6 +332,21 @@ enum macvlan_macaddr_mode { #define MACVLAN_FLAG_NOPROMISC 1 +/* IPVLAN section */ +enum { + IFLA_IPVLAN_UNSPEC, + IFLA_IPVLAN_MODE, + __IFLA_IPVLAN_MAX +}; + +#define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1) + +enum ipvlan_mode { + IPVLAN_MODE_L2 = 0, + IPVLAN_MODE_L3, + IPVLAN_MODE_MAX +}; + /* VXLAN section */ enum { IFLA_VXLAN_UNSPEC, diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 280d9e092283673c4ac6ef37c3b5ecec2cb406e6..bd3cc11a431f9e5bb01f18d84d3f388915043367 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -69,6 +69,7 @@ enum tunnel_encap_types { #define TUNNEL_ENCAP_FLAG_CSUM (1<<0) #define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1) +#define TUNNEL_ENCAP_FLAG_REMCSUM (1<<2) /* SIT-mode i_flags */ #define SIT_ISATAP 0x0001 diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index efa2666f4b8af10ca72c26206f82e527d7adc8e6..e863d088b9a5a54cfd18d1e507c6c6e758ba5516 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -164,6 +164,7 @@ enum { DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL, DEVCONF_SUPPRESS_FRAG_NDISC, DEVCONF_ACCEPT_RA_FROM_LOCAL, + DEVCONF_USE_OPTIMISTIC, DEVCONF_MAX }; diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 4a1d7e96dfe3d7ccf8669428a943ed53af37828e..f3d77f9f1e0bb582b1f1ca02fd8bd99ff60729dd 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -35,11 +35,11 @@ enum { */ #define NTF_USE 0x01 -#define NTF_PROXY 0x08 /* == ATF_PUBL */ -#define NTF_ROUTER 0x80 - #define NTF_SELF 0x02 #define NTF_MASTER 0x04 +#define NTF_PROXY 0x08 /* == ATF_PUBL */ +#define NTF_EXT_LEARNED 0x10 +#define NTF_ROUTER 0x80 /* * Neighbor Cache Entry States. diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index ff354021bb691c5e52dde58ec5010061f3ab37b8..edbc888ceb51c1ffa66b11c1f58eea0343b80faf 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -23,8 +23,9 @@ enum { SOF_TIMESTAMPING_OPT_ID = (1<<7), SOF_TIMESTAMPING_TX_SCHED = (1<<8), SOF_TIMESTAMPING_TX_ACK = (1<<9), + SOF_TIMESTAMPING_OPT_CMSG = (1<<10), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_TX_ACK, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_CMSG, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index ca03119111a2182dd5ff9b4f930153cf066e4ad3..5ab4e60894cf3269e176831cfbcee1c1751b8281 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -256,11 +256,17 @@ enum { IPSET_COUNTER_GT, }; -struct ip_set_counter_match { +/* Backward compatibility for set match v3 */ +struct ip_set_counter_match0 { __u8 op; __u64 value; }; +struct ip_set_counter_match { + __aligned_u64 value; + __u8 op; +}; + /* Interface to iptables/ip6tables */ #define SO_IP_SET 83 diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index f31fe7b660a555f8d646a415cec7f0b35ea57f9c..832bc46db78bc3498e8ab18b6e7a50ba12c86acd 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -579,6 +579,7 @@ enum nft_exthdr_attributes { * @NFT_META_CPU: cpu id through smp_processor_id() * @NFT_META_IIFGROUP: packet input interface group * @NFT_META_OIFGROUP: packet output interface group + * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid) */ enum nft_meta_keys { NFT_META_LEN, @@ -604,6 +605,7 @@ enum nft_meta_keys { NFT_META_CPU, NFT_META_IIFGROUP, NFT_META_OIFGROUP, + NFT_META_CGROUP, }; /** @@ -837,6 +839,22 @@ enum nft_masq_attributes { }; #define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1) +/** + * enum nft_redir_attributes - nf_tables redirect expression netlink attributes + * + * @NFTA_REDIR_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers) + * @NFTA_REDIR_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers) + * @NFTA_REDIR_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) + */ +enum nft_redir_attributes { + NFTA_REDIR_UNSPEC, + NFTA_REDIR_REG_PROTO_MIN, + NFTA_REDIR_REG_PROTO_MAX, + NFTA_REDIR_FLAGS, + __NFTA_REDIR_MAX +}; +#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1) + /** * enum nft_gen_attributes - nf_tables ruleset generation attributes * diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h index d6a1df1f2947ba6aa0cc8680a2efb8f61a0ae178..d4e02348384c6386163535ff5f1dffdf32ab257d 100644 --- a/include/uapi/linux/netfilter/xt_set.h +++ b/include/uapi/linux/netfilter/xt_set.h @@ -66,8 +66,8 @@ struct xt_set_info_target_v2 { struct xt_set_info_match_v3 { struct xt_set_info match_set; - struct ip_set_counter_match packets; - struct ip_set_counter_match bytes; + struct ip_set_counter_match0 packets; + struct ip_set_counter_match0 bytes; __u32 flags; }; @@ -81,4 +81,13 @@ struct xt_set_info_target_v3 { __u32 timeout; }; +/* Revision 4 match */ + +struct xt_set_info_match_v4 { + struct xt_set_info match_set; + struct ip_set_counter_match packets; + struct ip_set_counter_match bytes; + __u32 flags; +}; + #endif /*_XT_SET_H*/ diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 9b19b44619286616b04f1aa9f04d026878730744..8119255feae4b44df8bac40415f4681a9ccf3c97 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -116,6 +116,7 @@ enum nfc_commands { NFC_EVENT_SE_TRANSACTION, NFC_CMD_GET_SE, NFC_CMD_SE_IO, + NFC_CMD_ACTIVATE_TARGET, /* private: internal use only */ __NFC_CMD_AFTER_LAST }; @@ -196,15 +197,19 @@ enum nfc_sdp_attr { }; #define NFC_SDP_ATTR_MAX (__NFC_SDP_ATTR_AFTER_LAST - 1) -#define NFC_DEVICE_NAME_MAXSIZE 8 -#define NFC_NFCID1_MAXSIZE 10 -#define NFC_NFCID2_MAXSIZE 8 -#define NFC_NFCID3_MAXSIZE 10 -#define NFC_SENSB_RES_MAXSIZE 12 -#define NFC_SENSF_RES_MAXSIZE 18 -#define NFC_GB_MAXSIZE 48 -#define NFC_FIRMWARE_NAME_MAXSIZE 32 -#define NFC_ISO15693_UID_MAXSIZE 8 +#define NFC_DEVICE_NAME_MAXSIZE 8 +#define NFC_NFCID1_MAXSIZE 10 +#define NFC_NFCID2_MAXSIZE 8 +#define NFC_NFCID3_MAXSIZE 10 +#define NFC_SENSB_RES_MAXSIZE 12 +#define NFC_SENSF_RES_MAXSIZE 18 +#define NFC_ATR_REQ_MAXSIZE 64 +#define NFC_ATR_RES_MAXSIZE 64 +#define NFC_ATR_REQ_GB_MAXSIZE 48 +#define NFC_ATR_RES_GB_MAXSIZE 47 +#define NFC_GB_MAXSIZE 48 +#define NFC_FIRMWARE_NAME_MAXSIZE 32 +#define NFC_ISO15693_UID_MAXSIZE 8 /* NFC protocols */ #define NFC_PROTO_JEWEL 1 diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 4b28dc07bcb1fd3ea358b12c68432bf82fb27b23..b37bd5a1cb8253cd334ea05c3da920f0cf30f6e0 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -227,7 +227,11 @@ * the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC * or, if no MAC address given, all stations, on the interface identified - * by %NL80211_ATTR_IFINDEX. + * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and + * %NL80211_ATTR_REASON_CODE can optionally be used to specify which type + * of disconnection indication should be sent to the station + * (Deauthentication or Disassociation frame and reason code for that + * frame). * * @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to * destination %NL80211_ATTR_MAC on the interface identified by @@ -639,7 +643,18 @@ * @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels * independently of the userspace SME, send this event indicating * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ and the - * attributes determining channel width. + * attributes determining channel width. This indication may also be + * sent when a remotely-initiated switch (e.g., when a STA receives a CSA + * from the remote AP) is completed; + * + * @NL80211_CMD_CH_SWITCH_STARTED_NOTIFY: Notify that a channel switch + * has been started on an interface, regardless of the initiator + * (ie. whether it was requested from a remote device or + * initiated on our own). It indicates that + * %NL80211_ATTR_IFINDEX will be on %NL80211_ATTR_WIPHY_FREQ + * after %NL80211_ATTR_CH_SWITCH_COUNT TBTT's. The userspace may + * decide to react to this indication by requesting other + * interfaces to change channel as well. * * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by * its %NL80211_ATTR_WDEV identifier. It must have been created with @@ -738,6 +753,27 @@ * before removing a station entry entirely, or before disassociating * or similar, cleanup will happen in the driver/device in this case. * + * @NL80211_CMD_GET_MPP: Get mesh path attributes for mesh proxy path to + * destination %NL80211_ATTR_MAC on the interface identified by + * %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_JOIN_OCB: Join the OCB network. The center frequency and + * bandwidth of a channel must be given. + * @NL80211_CMD_LEAVE_OCB: Leave the OCB network -- no special arguments, the + * network is determined by the network interface. + * + * @NL80211_CMD_TDLS_CHANNEL_SWITCH: Start channel-switching with a TDLS peer, + * identified by the %NL80211_ATTR_MAC parameter. A target channel is + * provided via %NL80211_ATTR_WIPHY_FREQ and other attributes determining + * channel width/type. The target operating class is given via + * %NL80211_ATTR_OPER_CLASS. + * The driver is responsible for continually initiating channel-switching + * operations and returning to the base channel for communication with the + * AP. + * @NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH: Stop channel-switching with a TDLS + * peer given by %NL80211_ATTR_MAC. Both peers must be on the base channel + * when this command completes. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -912,6 +948,16 @@ enum nl80211_commands { NL80211_CMD_ADD_TX_TS, NL80211_CMD_DEL_TX_TS, + NL80211_CMD_GET_MPP, + + NL80211_CMD_JOIN_OCB, + NL80211_CMD_LEAVE_OCB, + + NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, + + NL80211_CMD_TDLS_CHANNEL_SWITCH, + NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1606,9 +1652,9 @@ enum nl80211_commands { * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32. * As specified in the &enum nl80211_tdls_peer_capability. * - * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface + * @NL80211_ATTR_SOCKET_OWNER: Flag attribute, if set during interface * creation then the new interface will be owned by the netlink socket - * that created it and will be destroyed when the socket is closed + * that created it and will be destroyed when the socket is closed. * * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is * the TDLS link initiator. @@ -1638,6 +1684,11 @@ enum nl80211_commands { * @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see * &enum nl80211_smps_mode. * + * @NL80211_ATTR_OPER_CLASS: operating class + * + * @NL80211_ATTR_MAC_MASK: MAC address mask + * + * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1973,7 +2024,7 @@ enum nl80211_attrs { NL80211_ATTR_TDLS_PEER_CAPABILITY, - NL80211_ATTR_IFACE_SOCKET_OWNER, + NL80211_ATTR_SOCKET_OWNER, NL80211_ATTR_CSA_C_OFFSETS_TX, NL80211_ATTR_MAX_CSA_COUNTERS, @@ -1990,15 +2041,21 @@ enum nl80211_attrs { NL80211_ATTR_SMPS_MODE, + NL80211_ATTR_OPER_CLASS, + + NL80211_ATTR_MAC_MASK, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, + NUM_NL80211_ATTR = __NL80211_ATTR_AFTER_LAST, NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 }; /* source-level API compatibility */ #define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION #define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG +#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER /* * Allow user space programs to use #ifdef on new attributes by defining them @@ -2064,6 +2121,8 @@ enum nl80211_attrs { * and therefore can't be created in the normal ways, use the * %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE * commands to create and destroy one + * @NL80211_IF_TYPE_OCB: Outside Context of a BSS + * This mode corresponds to the MIB variable dot11OCBActivated=true * @NL80211_IFTYPE_MAX: highest interface type number currently defined * @NUM_NL80211_IFTYPES: number of defined interface types * @@ -2083,6 +2142,7 @@ enum nl80211_iftype { NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO, NL80211_IFTYPE_P2P_DEVICE, + NL80211_IFTYPE_OCB, /* keep last */ NUM_NL80211_IFTYPES, @@ -2631,6 +2691,11 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated * base on contiguous rules and wider channels will be allowed to cross * multiple contiguous/overlapping frequency ranges. + * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT + * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation + * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation + * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed + * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -2643,11 +2708,18 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_IR = 1<<7, __NL80211_RRF_NO_IBSS = 1<<8, NL80211_RRF_AUTO_BW = 1<<11, + NL80211_RRF_GO_CONCURRENT = 1<<12, + NL80211_RRF_NO_HT40MINUS = 1<<13, + NL80211_RRF_NO_HT40PLUS = 1<<14, + NL80211_RRF_NO_80MHZ = 1<<15, + NL80211_RRF_NO_160MHZ = 1<<16, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR #define NL80211_RRF_NO_IBSS NL80211_RRF_NO_IR #define NL80211_RRF_NO_IR NL80211_RRF_NO_IR +#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\ + NL80211_RRF_NO_HT40PLUS) /* For backport compatibility with older userspace */ #define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS) @@ -3379,6 +3451,8 @@ enum nl80211_ps_state { * interval in which %NL80211_ATTR_CQM_TXE_PKTS and * %NL80211_ATTR_CQM_TXE_RATE must be satisfied before generating an * %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting. + * @NL80211_ATTR_CQM_BEACON_LOSS_EVENT: flag attribute that's set in a beacon + * loss event * @__NL80211_ATTR_CQM_AFTER_LAST: internal * @NL80211_ATTR_CQM_MAX: highest key attribute */ @@ -3391,6 +3465,7 @@ enum nl80211_attr_cqm { NL80211_ATTR_CQM_TXE_RATE, NL80211_ATTR_CQM_TXE_PKTS, NL80211_ATTR_CQM_TXE_INTVL, + NL80211_ATTR_CQM_BEACON_LOSS_EVENT, /* keep last */ __NL80211_ATTR_CQM_AFTER_LAST, @@ -3403,9 +3478,7 @@ enum nl80211_attr_cqm { * configured threshold * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the * configured threshold - * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: The device experienced beacon loss. - * (Note that deauth/disassoc will still follow if the AP is not - * available. This event might get used as roaming event, etc.) + * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: (reserved, never sent) */ enum nl80211_cqm_rssi_threshold_event { NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, @@ -3545,6 +3618,25 @@ struct nl80211_pattern_support { * @NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS: For wakeup reporting only, * the TCP connection ran out of tokens to use for data to send to the * service + * @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network + * is detected. This is a nested attribute that contains the + * same attributes used with @NL80211_CMD_START_SCHED_SCAN. It + * specifies how the scan is performed (e.g. the interval and the + * channels to scan) as well as the scan results that will + * trigger a wake (i.e. the matchsets). + * @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute + * containing an array with information about what triggered the + * wake up. If no elements are present in the array, it means + * that the information is not available. If more than one + * element is present, it means that more than one match + * occurred. + * Each element in the array is a nested attribute that contains + * one optional %NL80211_ATTR_SSID attribute and one optional + * %NL80211_ATTR_SCAN_FREQUENCIES attribute. At least one of + * these attributes must be present. If + * %NL80211_ATTR_SCAN_FREQUENCIES contains more than one + * frequency, it means that the match occurred in more than one + * channel. * @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers * @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number * @@ -3570,6 +3662,8 @@ enum nl80211_wowlan_triggers { NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST, NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS, + NL80211_WOWLAN_TRIG_NET_DETECT, + NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS, /* keep last */ NUM_NL80211_WOWLAN_TRIG, @@ -4042,6 +4136,27 @@ enum nl80211_ap_sme_features { * multiplexing powersave, ie. can turn off all but one chain * and then wake the rest up as required after, for example, * rts/cts handshake. + * @NL80211_FEATURE_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM + * TSPEC sessions (TID aka TSID 0-7) with the %NL80211_CMD_ADD_TX_TS + * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it + * needs to be able to handle Block-Ack agreements and other things. + * @NL80211_FEATURE_MAC_ON_CREATE: Device supports configuring + * the vif's MAC address upon creation. + * See 'macaddr' field in the vif_params (cfg80211.h). + * @NL80211_FEATURE_TDLS_CHANNEL_SWITCH: Driver supports channel switching when + * operating as a TDLS peer. + * @NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR: This device/driver supports using a + * random MAC address during scan (if the device is unassociated); the + * %NL80211_SCAN_FLAG_RANDOM_ADDR flag may be set for scans and the MAC + * address mask/value will be used. + * @NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR: This device/driver supports + * using a random MAC address for every scan iteration during scheduled + * scan (while not associated), the %NL80211_SCAN_FLAG_RANDOM_ADDR may + * be set for scheduled scan and the MAC address mask/value will be used. + * @NL80211_FEATURE_ND_RANDOM_MAC_ADDR: This device/driver supports using a + * random MAC address for every scan iteration during "net detect", i.e. + * scan in unassociated WoWLAN, the %NL80211_SCAN_FLAG_RANDOM_ADDR may + * be set for scheduled scan and the MAC address mask/value will be used. */ enum nl80211_feature_flags { NL80211_FEATURE_SK_TX_STATUS = 1 << 0, @@ -4070,6 +4185,12 @@ enum nl80211_feature_flags { NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23, NL80211_FEATURE_STATIC_SMPS = 1 << 24, NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25, + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 1 << 26, + NL80211_FEATURE_MAC_ON_CREATE = 1 << 27, + NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28, + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29, + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30, + NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31, }; /** @@ -4118,11 +4239,21 @@ enum nl80211_connect_failed_reason { * dangerous because will destroy stations performance as a lot of frames * will be lost while scanning off-channel, therefore it must be used only * when really needed + * @NL80211_SCAN_FLAG_RANDOM_ADDR: use a random MAC address for this scan (or + * for scheduled scan: a different one for every scan iteration). When the + * flag is set, depending on device capabilities the @NL80211_ATTR_MAC and + * @NL80211_ATTR_MAC_MASK attributes may also be given in which case only + * the masked bits will be preserved from the MAC address and the remainder + * randomised. If the attributes are not given full randomisation (46 bits, + * locally administered 1, multicast 0) is assumed. + * This flag must not be requested when the feature isn't supported, check + * the nl80211 feature flags for the device. */ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, NL80211_SCAN_FLAG_FLUSH = 1<<1, NL80211_SCAN_FLAG_AP = 1<<2, + NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3, }; /** diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 435eabc5ffaadb9e9e5d3dcfa554331065d430a6..3a6dcaa359b768d09bfc58f71c0f9b23582d9f24 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -157,6 +157,11 @@ enum ovs_packet_cmd { * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content * specified there. + * @OVS_PACKET_ATTR_EGRESS_TUN_KEY: Present for an %OVS_PACKET_CMD_ACTION + * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an + * %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the + * output port is actually a tunnel port. Contains the output tunnel key + * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes. * * These attributes follow the &struct ovs_header within the Generic Netlink * payload for %OVS_PACKET_* commands. @@ -167,6 +172,8 @@ enum ovs_packet_attr { OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ + OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* + attributes. */ __OVS_PACKET_ATTR_MAX }; @@ -293,6 +300,9 @@ enum ovs_key_attr { OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash is not computed by the datapath. */ OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */ + OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. + * The implementation may restrict + * the accepted length of the array. */ #ifdef __KERNEL__ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */ @@ -312,6 +322,8 @@ enum ovs_tunnel_key_attr { OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */ OVS_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */ OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */ + OVS_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */ + OVS_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */ __OVS_TUNNEL_KEY_ATTR_MAX }; @@ -340,6 +352,10 @@ struct ovs_key_ethernet { __u8 eth_dst[ETH_ALEN]; }; +struct ovs_key_mpls { + __be32 mpls_lse; +}; + struct ovs_key_ipv4 { __be32 ipv4_src; __be32 ipv4_dst; @@ -393,9 +409,9 @@ struct ovs_key_arp { }; struct ovs_key_nd { - __u32 nd_target[4]; - __u8 nd_sll[ETH_ALEN]; - __u8 nd_tll[ETH_ALEN]; + __be32 nd_target[4]; + __u8 nd_sll[ETH_ALEN]; + __u8 nd_tll[ETH_ALEN]; }; /** @@ -441,6 +457,8 @@ enum ovs_flow_attr { OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */ + OVS_FLOW_ATTR_PROBE, /* Flow operation is a feature probe, error + * logging should be suppressed. */ __OVS_FLOW_ATTR_MAX }; @@ -473,16 +491,33 @@ enum ovs_sample_attr { * message should be sent. Required. * @OVS_USERSPACE_ATTR_USERDATA: If present, its variable-length argument is * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA. + * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get + * tunnel info. */ enum ovs_userspace_attr { OVS_USERSPACE_ATTR_UNSPEC, OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */ + OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port + * to get tunnel info. */ __OVS_USERSPACE_ATTR_MAX }; #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) +/** + * struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument. + * @mpls_lse: MPLS label stack entry to push. + * @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame. + * + * The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and + * %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected. + */ +struct ovs_action_push_mpls { + __be32 mpls_lse; + __be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */ +}; + /** * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. * @vlan_tpid: Tag protocol identifier (TPID) to push. @@ -534,6 +569,15 @@ struct ovs_action_hash { * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in * the nested %OVS_SAMPLE_ATTR_* attributes. + * @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the + * top of the packets MPLS label stack. Set the ethertype of the + * encapsulating frame to either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC to + * indicate the new packet contents. + * @OVS_ACTION_ATTR_POP_MPLS: Pop an MPLS label stack entry off of the + * packet's MPLS label stack. Set the encapsulating frame's ethertype to + * indicate the new packet contents. This could potentially still be + * %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there + * is no MPLS label stack, as determined by ethertype, no action is taken. * * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all * fields within a header are modifiable, e.g. the IPv4 protocol and fragment @@ -550,6 +594,9 @@ enum ovs_action_attr { OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */ OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */ + OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */ + OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */ + __OVS_ACTION_ATTR_MAX }; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index eb0f1a554d7ba19c95b73315374c34757ba3ec97..9c9b8b4480cd4608d783318b7dbac4c060db210a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -235,6 +235,7 @@ enum { #define RTPROT_NTK 15 /* Netsukuku */ #define RTPROT_DHCP 16 /* DHCP client */ #define RTPROT_MROUTED 17 /* Multicast daemon */ +#define RTPROT_BABEL 42 /* Babel daemon */ /* rtm_scope diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index df40137f33dd48c0b23dad1f5cd13c357c91c031..b22224100011b9dc3b620c0c930344414889afb8 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -156,6 +156,7 @@ enum UDP_MIB_RCVBUFERRORS, /* RcvbufErrors */ UDP_MIB_SNDBUFERRORS, /* SndbufErrors */ UDP_MIB_CSUMERRORS, /* InCsumErrors */ + UDP_MIB_IGNOREDMULTI, /* IgnoredMulti */ __UDP_MIB_MAX }; @@ -265,6 +266,10 @@ enum LINUX_MIB_TCPWANTZEROWINDOWADV, /* TCPWantZeroWindowAdv */ LINUX_MIB_TCPSYNRETRANS, /* TCPSynRetrans */ LINUX_MIB_TCPORIGDATASENT, /* TCPOrigDataSent */ + LINUX_MIB_TCPHYSTARTTRAINDETECT, /* TCPHystartTrainDetect */ + LINUX_MIB_TCPHYSTARTTRAINCWND, /* TCPHystartTrainCwnd */ + LINUX_MIB_TCPHYSTARTDELAYDETECT, /* TCPHystartDelayDetect */ + LINUX_MIB_TCPHYSTARTDELAYCWND, /* TCPHystartDelayCwnd */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index 56f121605c998c28e8a173067e94e24ec7d9c653..b057da2b87a40a788346b09e46aaed27580f16c0 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild @@ -7,3 +7,4 @@ header-y += tc_mirred.h header-y += tc_nat.h header-y += tc_pedit.h header-y += tc_skbedit.h +header-y += tc_vlan.h diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h new file mode 100644 index 0000000000000000000000000000000000000000..f7b8d448b9603f9b1154472911f5a4b8ba663a54 --- /dev/null +++ b/include/uapi/linux/tc_act/tc_vlan.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2014 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_TC_VLAN_H +#define __LINUX_TC_VLAN_H + +#include + +#define TCA_ACT_VLAN 12 + +#define TCA_VLAN_ACT_POP 1 +#define TCA_VLAN_ACT_PUSH 2 + +struct tc_vlan { + tc_gen; + int v_action; +}; + +enum { + TCA_VLAN_UNSPEC, + TCA_VLAN_TM, + TCA_VLAN_PARMS, + TCA_VLAN_PUSH_VLAN_ID, + TCA_VLAN_PUSH_VLAN_PROTOCOL, + __TCA_VLAN_MAX, +}; +#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1) + +#endif diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h new file mode 100644 index 0000000000000000000000000000000000000000..8d723824ad6934825d34077431fc43d11ae4a718 --- /dev/null +++ b/include/uapi/linux/tipc_netlink.h @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2014, Ericsson AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_TIPC_NETLINK_H_ +#define _LINUX_TIPC_NETLINK_H_ + +#define TIPC_GENL_V2_NAME "TIPCv2" +#define TIPC_GENL_V2_VERSION 0x1 + +/* Netlink commands */ +enum { + TIPC_NL_UNSPEC, + TIPC_NL_LEGACY, + TIPC_NL_BEARER_DISABLE, + TIPC_NL_BEARER_ENABLE, + TIPC_NL_BEARER_GET, + TIPC_NL_BEARER_SET, + TIPC_NL_SOCK_GET, + TIPC_NL_PUBL_GET, + TIPC_NL_LINK_GET, + TIPC_NL_LINK_SET, + TIPC_NL_LINK_RESET_STATS, + TIPC_NL_MEDIA_GET, + TIPC_NL_MEDIA_SET, + TIPC_NL_NODE_GET, + TIPC_NL_NET_GET, + TIPC_NL_NET_SET, + TIPC_NL_NAME_TABLE_GET, + + __TIPC_NL_CMD_MAX, + TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1 +}; + +/* Top level netlink attributes */ +enum { + TIPC_NLA_UNSPEC, + TIPC_NLA_BEARER, /* nest */ + TIPC_NLA_SOCK, /* nest */ + TIPC_NLA_PUBL, /* nest */ + TIPC_NLA_LINK, /* nest */ + TIPC_NLA_MEDIA, /* nest */ + TIPC_NLA_NODE, /* nest */ + TIPC_NLA_NET, /* nest */ + TIPC_NLA_NAME_TABLE, /* nest */ + + __TIPC_NLA_MAX, + TIPC_NLA_MAX = __TIPC_NLA_MAX - 1 +}; + +/* Bearer info */ +enum { + TIPC_NLA_BEARER_UNSPEC, + TIPC_NLA_BEARER_NAME, /* string */ + TIPC_NLA_BEARER_PROP, /* nest */ + TIPC_NLA_BEARER_DOMAIN, /* u32 */ + + __TIPC_NLA_BEARER_MAX, + TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1 +}; + +/* Socket info */ +enum { + TIPC_NLA_SOCK_UNSPEC, + TIPC_NLA_SOCK_ADDR, /* u32 */ + TIPC_NLA_SOCK_REF, /* u32 */ + TIPC_NLA_SOCK_CON, /* nest */ + TIPC_NLA_SOCK_HAS_PUBL, /* flag */ + + __TIPC_NLA_SOCK_MAX, + TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1 +}; + +/* Link info */ +enum { + TIPC_NLA_LINK_UNSPEC, + TIPC_NLA_LINK_NAME, /* string */ + TIPC_NLA_LINK_DEST, /* u32 */ + TIPC_NLA_LINK_MTU, /* u32 */ + TIPC_NLA_LINK_BROADCAST, /* flag */ + TIPC_NLA_LINK_UP, /* flag */ + TIPC_NLA_LINK_ACTIVE, /* flag */ + TIPC_NLA_LINK_PROP, /* nest */ + TIPC_NLA_LINK_STATS, /* nest */ + TIPC_NLA_LINK_RX, /* u32 */ + TIPC_NLA_LINK_TX, /* u32 */ + + __TIPC_NLA_LINK_MAX, + TIPC_NLA_LINK_MAX = __TIPC_NLA_LINK_MAX - 1 +}; + +/* Media info */ +enum { + TIPC_NLA_MEDIA_UNSPEC, + TIPC_NLA_MEDIA_NAME, /* string */ + TIPC_NLA_MEDIA_PROP, /* nest */ + + __TIPC_NLA_MEDIA_MAX, + TIPC_NLA_MEDIA_MAX = __TIPC_NLA_MEDIA_MAX - 1 +}; + +/* Node info */ +enum { + TIPC_NLA_NODE_UNSPEC, + TIPC_NLA_NODE_ADDR, /* u32 */ + TIPC_NLA_NODE_UP, /* flag */ + + __TIPC_NLA_NODE_MAX, + TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1 +}; + +/* Net info */ +enum { + TIPC_NLA_NET_UNSPEC, + TIPC_NLA_NET_ID, /* u32 */ + TIPC_NLA_NET_ADDR, /* u32 */ + + __TIPC_NLA_NET_MAX, + TIPC_NLA_NET_MAX = __TIPC_NLA_NET_MAX - 1 +}; + +/* Name table info */ +enum { + TIPC_NLA_NAME_TABLE_UNSPEC, + TIPC_NLA_NAME_TABLE_PUBL, /* nest */ + + __TIPC_NLA_NAME_TABLE_MAX, + TIPC_NLA_NAME_TABLE_MAX = __TIPC_NLA_NAME_TABLE_MAX - 1 +}; + +/* Publication info */ +enum { + TIPC_NLA_PUBL_UNSPEC, + + TIPC_NLA_PUBL_TYPE, /* u32 */ + TIPC_NLA_PUBL_LOWER, /* u32 */ + TIPC_NLA_PUBL_UPPER, /* u32 */ + TIPC_NLA_PUBL_SCOPE, /* u32 */ + TIPC_NLA_PUBL_NODE, /* u32 */ + TIPC_NLA_PUBL_REF, /* u32 */ + TIPC_NLA_PUBL_KEY, /* u32 */ + + __TIPC_NLA_PUBL_MAX, + TIPC_NLA_PUBL_MAX = __TIPC_NLA_PUBL_MAX - 1 +}; + +/* Nest, connection info */ +enum { + TIPC_NLA_CON_UNSPEC, + + TIPC_NLA_CON_FLAG, /* flag */ + TIPC_NLA_CON_NODE, /* u32 */ + TIPC_NLA_CON_SOCK, /* u32 */ + TIPC_NLA_CON_TYPE, /* u32 */ + TIPC_NLA_CON_INST, /* u32 */ + + __TIPC_NLA_CON_MAX, + TIPC_NLA_CON_MAX = __TIPC_NLA_CON_MAX - 1 +}; + +/* Nest, link propreties. Valid for link, media and bearer */ +enum { + TIPC_NLA_PROP_UNSPEC, + + TIPC_NLA_PROP_PRIO, /* u32 */ + TIPC_NLA_PROP_TOL, /* u32 */ + TIPC_NLA_PROP_WIN, /* u32 */ + + __TIPC_NLA_PROP_MAX, + TIPC_NLA_PROP_MAX = __TIPC_NLA_PROP_MAX - 1 +}; + +/* Nest, statistics info */ +enum { + TIPC_NLA_STATS_UNSPEC, + + TIPC_NLA_STATS_RX_INFO, /* u32 */ + TIPC_NLA_STATS_RX_FRAGMENTS, /* u32 */ + TIPC_NLA_STATS_RX_FRAGMENTED, /* u32 */ + TIPC_NLA_STATS_RX_BUNDLES, /* u32 */ + TIPC_NLA_STATS_RX_BUNDLED, /* u32 */ + TIPC_NLA_STATS_TX_INFO, /* u32 */ + TIPC_NLA_STATS_TX_FRAGMENTS, /* u32 */ + TIPC_NLA_STATS_TX_FRAGMENTED, /* u32 */ + TIPC_NLA_STATS_TX_BUNDLES, /* u32 */ + TIPC_NLA_STATS_TX_BUNDLED, /* u32 */ + TIPC_NLA_STATS_MSG_PROF_TOT, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_CNT, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_TOT, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P0, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P1, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P2, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P3, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P4, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P5, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P6, /* u32 */ + TIPC_NLA_STATS_RX_STATES, /* u32 */ + TIPC_NLA_STATS_RX_PROBES, /* u32 */ + TIPC_NLA_STATS_RX_NACKS, /* u32 */ + TIPC_NLA_STATS_RX_DEFERRED, /* u32 */ + TIPC_NLA_STATS_TX_STATES, /* u32 */ + TIPC_NLA_STATS_TX_PROBES, /* u32 */ + TIPC_NLA_STATS_TX_NACKS, /* u32 */ + TIPC_NLA_STATS_TX_ACKS, /* u32 */ + TIPC_NLA_STATS_RETRANSMITTED, /* u32 */ + TIPC_NLA_STATS_DUPLICATES, /* u32 */ + TIPC_NLA_STATS_LINK_CONGS, /* u32 */ + TIPC_NLA_STATS_MAX_QUEUE, /* u32 */ + TIPC_NLA_STATS_AVG_QUEUE, /* u32 */ + + __TIPC_NLA_STATS_MAX, + TIPC_NLA_STATS_MAX = __TIPC_NLA_STATS_MAX - 1 +}; + +#endif diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 0daf7f6ae7df565c43273b8fb3a553bc1ffdd622..a5ae60f0b0a2d33a48cf7d5b2585e5d048f931f6 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -1,5 +1,5 @@ obj-y := core.o -obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o +obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o hashtab.o arraymap.o helpers.o ifdef CONFIG_TEST_BPF obj-$(CONFIG_BPF_SYSCALL) += test_stub.o endif diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c new file mode 100644 index 0000000000000000000000000000000000000000..9eb4d8a7cd870b513d9d19e2707bc2950e91f744 --- /dev/null +++ b/kernel/bpf/arraymap.c @@ -0,0 +1,156 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + char value[0] __aligned(8); +}; + +/* Called from syscall */ +static struct bpf_map *array_map_alloc(union bpf_attr *attr) +{ + struct bpf_array *array; + u32 elem_size, array_size; + + /* check sanity of attributes */ + if (attr->max_entries == 0 || attr->key_size != 4 || + attr->value_size == 0) + return ERR_PTR(-EINVAL); + + elem_size = round_up(attr->value_size, 8); + + /* check round_up into zero and u32 overflow */ + if (elem_size == 0 || + attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size) + return ERR_PTR(-ENOMEM); + + array_size = sizeof(*array) + attr->max_entries * elem_size; + + /* allocate all map elements and zero-initialize them */ + array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); + if (!array) { + array = vzalloc(array_size); + if (!array) + return ERR_PTR(-ENOMEM); + } + + /* copy mandatory map attributes */ + array->map.key_size = attr->key_size; + array->map.value_size = attr->value_size; + array->map.max_entries = attr->max_entries; + + array->elem_size = elem_size; + + return &array->map; +} + +/* Called from syscall or from eBPF program */ +static void *array_map_lookup_elem(struct bpf_map *map, void *key) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + u32 index = *(u32 *)key; + + if (index >= array->map.max_entries) + return NULL; + + return array->value + array->elem_size * index; +} + +/* Called from syscall */ +static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + u32 index = *(u32 *)key; + u32 *next = (u32 *)next_key; + + if (index >= array->map.max_entries) { + *next = 0; + return 0; + } + + if (index == array->map.max_entries - 1) + return -ENOENT; + + *next = index + 1; + return 0; +} + +/* Called from syscall or from eBPF program */ +static int array_map_update_elem(struct bpf_map *map, void *key, void *value, + u64 map_flags) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + u32 index = *(u32 *)key; + + if (map_flags > BPF_EXIST) + /* unknown flags */ + return -EINVAL; + + if (index >= array->map.max_entries) + /* all elements were pre-allocated, cannot insert a new one */ + return -E2BIG; + + if (map_flags == BPF_NOEXIST) + /* all elements already exist */ + return -EEXIST; + + memcpy(array->value + array->elem_size * index, value, array->elem_size); + return 0; +} + +/* Called from syscall or from eBPF program */ +static int array_map_delete_elem(struct bpf_map *map, void *key) +{ + return -EINVAL; +} + +/* Called when map->refcnt goes to zero, either from workqueue or from syscall */ +static void array_map_free(struct bpf_map *map) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + + /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, + * so the programs (can be more than one that used this map) were + * disconnected from events. Wait for outstanding programs to complete + * and free the array + */ + synchronize_rcu(); + + kvfree(array); +} + +static struct bpf_map_ops array_ops = { + .map_alloc = array_map_alloc, + .map_free = array_map_free, + .map_get_next_key = array_map_get_next_key, + .map_lookup_elem = array_map_lookup_elem, + .map_update_elem = array_map_update_elem, + .map_delete_elem = array_map_delete_elem, +}; + +static struct bpf_map_type_list tl = { + .ops = &array_ops, + .type = BPF_MAP_TYPE_ARRAY, +}; + +static int __init register_array_map(void) +{ + bpf_register_map_type(&tl); + return 0; +} +late_initcall(register_array_map); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c new file mode 100644 index 0000000000000000000000000000000000000000..b3ba43674310145585d8a1441030a50ebac48252 --- /dev/null +++ b/kernel/bpf/hashtab.c @@ -0,0 +1,367 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include + +struct bpf_htab { + struct bpf_map map; + struct hlist_head *buckets; + spinlock_t lock; + u32 count; /* number of elements in this hashtable */ + u32 n_buckets; /* number of hash buckets */ + u32 elem_size; /* size of each element in bytes */ +}; + +/* each htab element is struct htab_elem + key + value */ +struct htab_elem { + struct hlist_node hash_node; + struct rcu_head rcu; + u32 hash; + char key[0] __aligned(8); +}; + +/* Called from syscall */ +static struct bpf_map *htab_map_alloc(union bpf_attr *attr) +{ + struct bpf_htab *htab; + int err, i; + + htab = kzalloc(sizeof(*htab), GFP_USER); + if (!htab) + return ERR_PTR(-ENOMEM); + + /* mandatory map attributes */ + htab->map.key_size = attr->key_size; + htab->map.value_size = attr->value_size; + htab->map.max_entries = attr->max_entries; + + /* check sanity of attributes. + * value_size == 0 may be allowed in the future to use map as a set + */ + err = -EINVAL; + if (htab->map.max_entries == 0 || htab->map.key_size == 0 || + htab->map.value_size == 0) + goto free_htab; + + /* hash table size must be power of 2 */ + htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); + + err = -E2BIG; + if (htab->map.key_size > MAX_BPF_STACK) + /* eBPF programs initialize keys on stack, so they cannot be + * larger than max stack size + */ + goto free_htab; + + err = -ENOMEM; + /* prevent zero size kmalloc and check for u32 overflow */ + if (htab->n_buckets == 0 || + htab->n_buckets > U32_MAX / sizeof(struct hlist_head)) + goto free_htab; + + htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head), + GFP_USER | __GFP_NOWARN); + + if (!htab->buckets) { + htab->buckets = vmalloc(htab->n_buckets * sizeof(struct hlist_head)); + if (!htab->buckets) + goto free_htab; + } + + for (i = 0; i < htab->n_buckets; i++) + INIT_HLIST_HEAD(&htab->buckets[i]); + + spin_lock_init(&htab->lock); + htab->count = 0; + + htab->elem_size = sizeof(struct htab_elem) + + round_up(htab->map.key_size, 8) + + htab->map.value_size; + return &htab->map; + +free_htab: + kfree(htab); + return ERR_PTR(err); +} + +static inline u32 htab_map_hash(const void *key, u32 key_len) +{ + return jhash(key, key_len, 0); +} + +static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) +{ + return &htab->buckets[hash & (htab->n_buckets - 1)]; +} + +static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, + void *key, u32 key_size) +{ + struct htab_elem *l; + + hlist_for_each_entry_rcu(l, head, hash_node) + if (l->hash == hash && !memcmp(&l->key, key, key_size)) + return l; + + return NULL; +} + +/* Called from syscall or from eBPF program */ +static void *htab_map_lookup_elem(struct bpf_map *map, void *key) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct hlist_head *head; + struct htab_elem *l; + u32 hash, key_size; + + /* Must be called with rcu_read_lock. */ + WARN_ON_ONCE(!rcu_read_lock_held()); + + key_size = map->key_size; + + hash = htab_map_hash(key, key_size); + + head = select_bucket(htab, hash); + + l = lookup_elem_raw(head, hash, key, key_size); + + if (l) + return l->key + round_up(map->key_size, 8); + + return NULL; +} + +/* Called from syscall */ +static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct hlist_head *head; + struct htab_elem *l, *next_l; + u32 hash, key_size; + int i; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + key_size = map->key_size; + + hash = htab_map_hash(key, key_size); + + head = select_bucket(htab, hash); + + /* lookup the key */ + l = lookup_elem_raw(head, hash, key, key_size); + + if (!l) { + i = 0; + goto find_first_elem; + } + + /* key was found, get next key in the same bucket */ + next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)), + struct htab_elem, hash_node); + + if (next_l) { + /* if next elem in this hash list is non-zero, just return it */ + memcpy(next_key, next_l->key, key_size); + return 0; + } + + /* no more elements in this hash list, go to the next bucket */ + i = hash & (htab->n_buckets - 1); + i++; + +find_first_elem: + /* iterate over buckets */ + for (; i < htab->n_buckets; i++) { + head = select_bucket(htab, i); + + /* pick first element in the bucket */ + next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), + struct htab_elem, hash_node); + if (next_l) { + /* if it's not empty, just return it */ + memcpy(next_key, next_l->key, key_size); + return 0; + } + } + + /* itereated over all buckets and all elements */ + return -ENOENT; +} + +/* Called from syscall or from eBPF program */ +static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, + u64 map_flags) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct htab_elem *l_new, *l_old; + struct hlist_head *head; + unsigned long flags; + u32 key_size; + int ret; + + if (map_flags > BPF_EXIST) + /* unknown flags */ + return -EINVAL; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + /* allocate new element outside of lock */ + l_new = kmalloc(htab->elem_size, GFP_ATOMIC); + if (!l_new) + return -ENOMEM; + + key_size = map->key_size; + + memcpy(l_new->key, key, key_size); + memcpy(l_new->key + round_up(key_size, 8), value, map->value_size); + + l_new->hash = htab_map_hash(l_new->key, key_size); + + /* bpf_map_update_elem() can be called in_irq() */ + spin_lock_irqsave(&htab->lock, flags); + + head = select_bucket(htab, l_new->hash); + + l_old = lookup_elem_raw(head, l_new->hash, key, key_size); + + if (!l_old && unlikely(htab->count >= map->max_entries)) { + /* if elem with this 'key' doesn't exist and we've reached + * max_entries limit, fail insertion of new elem + */ + ret = -E2BIG; + goto err; + } + + if (l_old && map_flags == BPF_NOEXIST) { + /* elem already exists */ + ret = -EEXIST; + goto err; + } + + if (!l_old && map_flags == BPF_EXIST) { + /* elem doesn't exist, cannot update it */ + ret = -ENOENT; + goto err; + } + + /* add new element to the head of the list, so that concurrent + * search will find it before old elem + */ + hlist_add_head_rcu(&l_new->hash_node, head); + if (l_old) { + hlist_del_rcu(&l_old->hash_node); + kfree_rcu(l_old, rcu); + } else { + htab->count++; + } + spin_unlock_irqrestore(&htab->lock, flags); + + return 0; +err: + spin_unlock_irqrestore(&htab->lock, flags); + kfree(l_new); + return ret; +} + +/* Called from syscall or from eBPF program */ +static int htab_map_delete_elem(struct bpf_map *map, void *key) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct hlist_head *head; + struct htab_elem *l; + unsigned long flags; + u32 hash, key_size; + int ret = -ENOENT; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + key_size = map->key_size; + + hash = htab_map_hash(key, key_size); + + spin_lock_irqsave(&htab->lock, flags); + + head = select_bucket(htab, hash); + + l = lookup_elem_raw(head, hash, key, key_size); + + if (l) { + hlist_del_rcu(&l->hash_node); + htab->count--; + kfree_rcu(l, rcu); + ret = 0; + } + + spin_unlock_irqrestore(&htab->lock, flags); + return ret; +} + +static void delete_all_elements(struct bpf_htab *htab) +{ + int i; + + for (i = 0; i < htab->n_buckets; i++) { + struct hlist_head *head = select_bucket(htab, i); + struct hlist_node *n; + struct htab_elem *l; + + hlist_for_each_entry_safe(l, n, head, hash_node) { + hlist_del_rcu(&l->hash_node); + htab->count--; + kfree(l); + } + } +} + +/* Called when map->refcnt goes to zero, either from workqueue or from syscall */ +static void htab_map_free(struct bpf_map *map) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + + /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, + * so the programs (can be more than one that used this map) were + * disconnected from events. Wait for outstanding critical sections in + * these programs to complete + */ + synchronize_rcu(); + + /* some of kfree_rcu() callbacks for elements of this map may not have + * executed. It's ok. Proceed to free residual elements and map itself + */ + delete_all_elements(htab); + kvfree(htab->buckets); + kfree(htab); +} + +static struct bpf_map_ops htab_ops = { + .map_alloc = htab_map_alloc, + .map_free = htab_map_free, + .map_get_next_key = htab_map_get_next_key, + .map_lookup_elem = htab_map_lookup_elem, + .map_update_elem = htab_map_update_elem, + .map_delete_elem = htab_map_delete_elem, +}; + +static struct bpf_map_type_list tl = { + .ops = &htab_ops, + .type = BPF_MAP_TYPE_HASH, +}; + +static int __init register_htab_map(void) +{ + bpf_register_map_type(&tl); + return 0; +} +late_initcall(register_htab_map); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c new file mode 100644 index 0000000000000000000000000000000000000000..9e3414d854593f922fe6cc0021c07727c5b3daa0 --- /dev/null +++ b/kernel/bpf/helpers.c @@ -0,0 +1,89 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include + +/* If kernel subsystem is allowing eBPF programs to call this function, + * inside its own verifier_ops->get_func_proto() callback it should return + * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments + * + * Different map implementations will rely on rcu in map methods + * lookup/update/delete, therefore eBPF programs must run under rcu lock + * if program is allowed to access maps, so check rcu_read_lock_held in + * all three functions. + */ +static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + /* verifier checked that R1 contains a valid pointer to bpf_map + * and R2 points to a program stack and map->key_size bytes were + * initialized + */ + struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; + void *key = (void *) (unsigned long) r2; + void *value; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + value = map->ops->map_lookup_elem(map, key); + + /* lookup() returns either pointer to element value or NULL + * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type + */ + return (unsigned long) value; +} + +struct bpf_func_proto bpf_map_lookup_elem_proto = { + .func = bpf_map_lookup_elem, + .gpl_only = false, + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, +}; + +static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; + void *key = (void *) (unsigned long) r2; + void *value = (void *) (unsigned long) r3; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + return map->ops->map_update_elem(map, key, value, r4); +} + +struct bpf_func_proto bpf_map_update_elem_proto = { + .func = bpf_map_update_elem, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, + .arg3_type = ARG_PTR_TO_MAP_VALUE, + .arg4_type = ARG_ANYTHING, +}; + +static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; + void *key = (void *) (unsigned long) r2; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + return map->ops->map_delete_elem(map, key); +} + +struct bpf_func_proto bpf_map_delete_elem_proto = { + .func = bpf_map_delete_elem, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, +}; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ba61c8c160321ea6f333c560a9dc1e55f8b83632..088ac0b1b106ff772e9122529866eb5406effd14 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -169,7 +169,7 @@ static int map_lookup_elem(union bpf_attr *attr) if (copy_from_user(key, ukey, map->key_size) != 0) goto free_key; - err = -ESRCH; + err = -ENOENT; rcu_read_lock(); value = map->ops->map_lookup_elem(map, key); if (!value) @@ -190,7 +190,7 @@ err_put: return err; } -#define BPF_MAP_UPDATE_ELEM_LAST_FIELD value +#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags static int map_update_elem(union bpf_attr *attr) { @@ -231,7 +231,7 @@ static int map_update_elem(union bpf_attr *attr) * therefore all map accessors rely on this fact, so do the same here */ rcu_read_lock(); - err = map->ops->map_update_elem(map, key, value); + err = map->ops->map_update_elem(map, key, value, attr->flags); rcu_read_unlock(); free_value: diff --git a/kernel/bpf/test_stub.c b/kernel/bpf/test_stub.c index fcaddff4003e27561c63f67d934360cfb2adeeb6..0ceae1e6e8b52c9aca882aa264ef79b3f1f5a682 100644 --- a/kernel/bpf/test_stub.c +++ b/kernel/bpf/test_stub.c @@ -18,26 +18,18 @@ struct bpf_context { u64 arg2; }; -static u64 test_func(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) -{ - return 0; -} - -static struct bpf_func_proto test_funcs[] = { - [BPF_FUNC_unspec] = { - .func = test_func, - .gpl_only = true, - .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, - .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MAP_KEY, - }, -}; - static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id) { - if (func_id < 0 || func_id >= ARRAY_SIZE(test_funcs)) + switch (func_id) { + case BPF_FUNC_map_lookup_elem: + return &bpf_map_lookup_elem_proto; + case BPF_FUNC_map_update_elem: + return &bpf_map_update_elem_proto; + case BPF_FUNC_map_delete_elem: + return &bpf_map_delete_elem_proto; + default: return NULL; - return &test_funcs[func_id]; + } } static const struct bpf_context_access { @@ -78,38 +70,8 @@ static struct bpf_prog_type_list tl_prog = { .type = BPF_PROG_TYPE_UNSPEC, }; -static struct bpf_map *test_map_alloc(union bpf_attr *attr) -{ - struct bpf_map *map; - - map = kzalloc(sizeof(*map), GFP_USER); - if (!map) - return ERR_PTR(-ENOMEM); - - map->key_size = attr->key_size; - map->value_size = attr->value_size; - map->max_entries = attr->max_entries; - return map; -} - -static void test_map_free(struct bpf_map *map) -{ - kfree(map); -} - -static struct bpf_map_ops test_map_ops = { - .map_alloc = test_map_alloc, - .map_free = test_map_free, -}; - -static struct bpf_map_type_list tl_map = { - .ops = &test_map_ops, - .type = BPF_MAP_TYPE_UNSPEC, -}; - static int __init register_test_ops(void) { - bpf_register_map_type(&tl_map); bpf_register_prog_type(&tl_prog); return 0; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9f81818f294185988c9cc181274ed38789774871..a28e09c7825d76d5ab10530a88fa183994f48d1e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -153,22 +153,19 @@ struct reg_state { enum bpf_stack_slot_type { STACK_INVALID, /* nothing was stored in this stack slot */ - STACK_SPILL, /* 1st byte of register spilled into stack */ - STACK_SPILL_PART, /* other 7 bytes of register spill */ + STACK_SPILL, /* register spilled into stack */ STACK_MISC /* BPF program wrote some data into this slot */ }; -struct bpf_stack_slot { - enum bpf_stack_slot_type stype; - struct reg_state reg_st; -}; +#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ /* state of the program: * type of all registers and stack info */ struct verifier_state { struct reg_state regs[MAX_BPF_REG]; - struct bpf_stack_slot stack[MAX_BPF_STACK]; + u8 stack_slot_type[MAX_BPF_STACK]; + struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; }; /* linked list of verifier states used to prune search */ @@ -259,10 +256,10 @@ static void print_verifier_state(struct verifier_env *env) env->cur_state.regs[i].map_ptr->key_size, env->cur_state.regs[i].map_ptr->value_size); } - for (i = 0; i < MAX_BPF_STACK; i++) { - if (env->cur_state.stack[i].stype == STACK_SPILL) + for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { + if (env->cur_state.stack_slot_type[i] == STACK_SPILL) verbose(" fp%d=%s", -MAX_BPF_STACK + i, - reg_type_str[env->cur_state.stack[i].reg_st.type]); + reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]); } verbose("\n"); } @@ -539,8 +536,10 @@ static int bpf_size_to_bytes(int bpf_size) static int check_stack_write(struct verifier_state *state, int off, int size, int value_regno) { - struct bpf_stack_slot *slot; int i; + /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, + * so it's aligned access and [off, off + size) are within stack limits + */ if (value_regno >= 0 && (state->regs[value_regno].type == PTR_TO_MAP_VALUE || @@ -548,30 +547,24 @@ static int check_stack_write(struct verifier_state *state, int off, int size, state->regs[value_regno].type == PTR_TO_CTX)) { /* register containing pointer is being spilled into stack */ - if (size != 8) { + if (size != BPF_REG_SIZE) { verbose("invalid size of register spill\n"); return -EACCES; } - slot = &state->stack[MAX_BPF_STACK + off]; - slot->stype = STACK_SPILL; /* save register state */ - slot->reg_st = state->regs[value_regno]; - for (i = 1; i < 8; i++) { - slot = &state->stack[MAX_BPF_STACK + off + i]; - slot->stype = STACK_SPILL_PART; - slot->reg_st.type = UNKNOWN_VALUE; - slot->reg_st.map_ptr = NULL; - } - } else { + state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = + state->regs[value_regno]; + for (i = 0; i < BPF_REG_SIZE; i++) + state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; + } else { /* regular write of data into stack */ - for (i = 0; i < size; i++) { - slot = &state->stack[MAX_BPF_STACK + off + i]; - slot->stype = STACK_MISC; - slot->reg_st.type = UNKNOWN_VALUE; - slot->reg_st.map_ptr = NULL; - } + state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = + (struct reg_state) {}; + + for (i = 0; i < size; i++) + state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; } return 0; } @@ -579,19 +572,18 @@ static int check_stack_write(struct verifier_state *state, int off, int size, static int check_stack_read(struct verifier_state *state, int off, int size, int value_regno) { + u8 *slot_type; int i; - struct bpf_stack_slot *slot; - slot = &state->stack[MAX_BPF_STACK + off]; + slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; - if (slot->stype == STACK_SPILL) { - if (size != 8) { + if (slot_type[0] == STACK_SPILL) { + if (size != BPF_REG_SIZE) { verbose("invalid size of register spill\n"); return -EACCES; } - for (i = 1; i < 8; i++) { - if (state->stack[MAX_BPF_STACK + off + i].stype != - STACK_SPILL_PART) { + for (i = 1; i < BPF_REG_SIZE; i++) { + if (slot_type[i] != STACK_SPILL) { verbose("corrupted spill memory\n"); return -EACCES; } @@ -599,12 +591,12 @@ static int check_stack_read(struct verifier_state *state, int off, int size, if (value_regno >= 0) /* restore register state from stack */ - state->regs[value_regno] = slot->reg_st; + state->regs[value_regno] = + state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; return 0; } else { for (i = 0; i < size; i++) { - if (state->stack[MAX_BPF_STACK + off + i].stype != - STACK_MISC) { + if (slot_type[i] != STACK_MISC) { verbose("invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; @@ -747,7 +739,7 @@ static int check_stack_boundary(struct verifier_env *env, } for (i = 0; i < access_size; i++) { - if (state->stack[MAX_BPF_STACK + off + i].stype != STACK_MISC) { + if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { verbose("invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; @@ -1180,6 +1172,70 @@ static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn) return 0; } +/* verify safety of LD_ABS|LD_IND instructions: + * - they can only appear in the programs where ctx == skb + * - since they are wrappers of function calls, they scratch R1-R5 registers, + * preserve R6-R9, and store return value into R0 + * + * Implicit input: + * ctx == skb == R6 == CTX + * + * Explicit input: + * SRC == any register + * IMM == 32-bit immediate + * + * Output: + * R0 - 8/16/32-bit skb data converted to cpu endianness + */ +static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn) +{ + struct reg_state *regs = env->cur_state.regs; + u8 mode = BPF_MODE(insn->code); + struct reg_state *reg; + int i, err; + + if (env->prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) { + verbose("BPF_LD_ABS|IND instructions are only allowed in socket filters\n"); + return -EINVAL; + } + + if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || + (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { + verbose("BPF_LD_ABS uses reserved fields\n"); + return -EINVAL; + } + + /* check whether implicit source operand (register R6) is readable */ + err = check_reg_arg(regs, BPF_REG_6, SRC_OP); + if (err) + return err; + + if (regs[BPF_REG_6].type != PTR_TO_CTX) { + verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); + return -EINVAL; + } + + if (mode == BPF_IND) { + /* check explicit source operand */ + err = check_reg_arg(regs, insn->src_reg, SRC_OP); + if (err) + return err; + } + + /* reset caller saved regs to unreadable */ + for (i = 0; i < CALLER_SAVED_REGS; i++) { + reg = regs + caller_saved[i]; + reg->type = NOT_INIT; + reg->imm = 0; + } + + /* mark destination R0 register as readable, since it contains + * the value fetched from the packet + */ + regs[BPF_REG_0].type = UNKNOWN_VALUE; + return 0; +} + /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered @@ -1417,12 +1473,33 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur) } for (i = 0; i < MAX_BPF_STACK; i++) { - if (memcmp(&old->stack[i], &cur->stack[i], - sizeof(old->stack[0])) != 0) { - if (old->stack[i].stype == STACK_INVALID) - continue; + if (old->stack_slot_type[i] == STACK_INVALID) + continue; + if (old->stack_slot_type[i] != cur->stack_slot_type[i]) + /* Ex: old explored (safe) state has STACK_SPILL in + * this stack slot, but current has has STACK_MISC -> + * this verifier states are not equivalent, + * return false to continue verification of this path + */ return false; - } + if (i % BPF_REG_SIZE) + continue; + if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], + &cur->spilled_regs[i / BPF_REG_SIZE], + sizeof(old->spilled_regs[0]))) + /* when explored and current stack slot types are + * the same, check that stored pointers types + * are the same as well. + * Ex: explored safe path could have stored + * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8} + * but current path has stored: + * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16} + * such verifier states are not equivalent. + * return false to continue verification of this path + */ + return false; + else + continue; } return true; } @@ -1664,8 +1741,10 @@ process_bpf_exit: u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { - verbose("LD_ABS is not supported yet\n"); - return -EINVAL; + err = check_ld_abs(env, insn); + if (err) + return err; + } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) diff --git a/lib/Makefile b/lib/Makefile index 923a191eaf7163559e19cc6bebe5ad2a4489c280..3c3b30b9e020d2e4bc02edc73575555641aad057 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o + percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/lib/hash.c b/lib/hash.c deleted file mode 100644 index fea973f4bd57ae4f4638817becebe016fa1dd716..0000000000000000000000000000000000000000 --- a/lib/hash.c +++ /dev/null @@ -1,39 +0,0 @@ -/* General purpose hashing library - * - * That's a start of a kernel hashing library, which can be extended - * with further algorithms in future. arch_fast_hash{2,}() will - * eventually resolve to an architecture optimized implementation. - * - * Copyright 2013 Francesco Fusco - * Copyright 2013 Daniel Borkmann - * Copyright 2013 Thomas Graf - * Licensed under the GNU General Public License, version 2.0 (GPLv2) - */ - -#include -#include -#include - -static struct fast_hash_ops arch_hash_ops __read_mostly = { - .hash = jhash, - .hash2 = jhash2, -}; - -u32 arch_fast_hash(const void *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash); - -u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash2(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash2); - -static int __init hashlib_init(void) -{ - setup_arch_fast_hash(&arch_hash_ops); - return 0; -} -early_initcall(hashlib_init); diff --git a/lib/iovec.c b/lib/iovec.c index df3abd1eaa4a52da47f806ffa7f4d93867366637..2d99cb4a500634b43fad5fe176c4951795b60721 100644 --- a/lib/iovec.c +++ b/lib/iovec.c @@ -27,31 +27,6 @@ int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) } EXPORT_SYMBOL(memcpy_fromiovec); -/* - * Copy kernel to iovec. Returns -EFAULT on error. - * - * Note: this modifies the original iovec. - */ - -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) -{ - while (len > 0) { - if (iov->iov_len) { - int copy = min_t(unsigned int, iov->iov_len, len); - if (copy_to_user(iov->iov_base, kdata, copy)) - return -EFAULT; - kdata += copy; - len -= copy; - iov->iov_len -= copy; - iov->iov_base += copy; - } - iov++; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_toiovec); - /* * Copy kernel to iovec. Returns -EFAULT on error. */ diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 624a0b7c05ef15934cee4a2574852f74620878a9..6c3c723e902bb42c194fbf1c979a2197a549ffc2 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -32,7 +32,7 @@ #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(const struct rhashtable *ht) { - return ht->p.mutex_is_held(); + return ht->p.mutex_is_held(ht->p.parent); } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); #endif @@ -107,13 +107,13 @@ static u32 head_hashfn(const struct rhashtable *ht, return obj_hashfn(ht, rht_obj(ht, he), hsize); } -static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) +static struct bucket_table *bucket_table_alloc(size_t nbuckets) { struct bucket_table *tbl; size_t size; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); - tbl = kzalloc(size, flags); + tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (tbl == NULL) tbl = vzalloc(size); @@ -200,7 +200,6 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /** * rhashtable_expand - Expand hash table while allowing concurrent lookups * @ht: the hash table to expand - * @flags: allocation flags * * A secondary bucket array is allocated and the hash entries are migrated * while keeping them on both lists until the end of the RCU grace period. @@ -211,7 +210,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. */ -int rhashtable_expand(struct rhashtable *ht, gfp_t flags) +int rhashtable_expand(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_head *he; @@ -223,7 +222,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags) if (ht->p.max_shift && ht->shift >= ht->p.max_shift) return 0; - new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); + new_tbl = bucket_table_alloc(old_tbl->size * 2); if (new_tbl == NULL) return -ENOMEM; @@ -281,7 +280,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); /** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink - * @flags: allocation flags * * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. @@ -289,7 +287,7 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. */ -int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) +int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); struct rhash_head __rcu **pprev; @@ -300,7 +298,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) if (ht->shift <= ht->p.min_shift) return 0; - ntbl = bucket_table_alloc(tbl->size / 2, flags); + ntbl = bucket_table_alloc(tbl->size / 2); if (ntbl == NULL) return -ENOMEM; @@ -341,7 +339,6 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); * rhashtable_insert - insert object into hash hash table * @ht: hash table * @obj: pointer to hash head inside object - * @flags: allocation flags (table expansion) * * Will automatically grow the table via rhashtable_expand() if the the * grow_decision function specified at rhashtable_init() returns true. @@ -349,8 +346,7 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); * The caller must ensure that no concurrent table mutations occur. It is * however valid to have concurrent lookups if they are RCU protected. */ -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, - gfp_t flags) +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl = rht_dereference(ht->tbl, ht); u32 hash; @@ -363,7 +359,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, ht->nelems++; if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) - rhashtable_expand(ht, flags); + rhashtable_expand(ht); } EXPORT_SYMBOL_GPL(rhashtable_insert); @@ -372,14 +368,13 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); * @ht: hash table * @obj: pointer to hash head inside object * @pprev: pointer to previous element - * @flags: allocation flags (table expansion) * * Identical to rhashtable_remove() but caller is alreayd aware of the element * in front of the element to be deleted. This is in particular useful for * deletion when combined with walking or lookup. */ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev, gfp_t flags) + struct rhash_head __rcu **pprev) { struct bucket_table *tbl = rht_dereference(ht->tbl, ht); @@ -390,7 +385,7 @@ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) - rhashtable_shrink(ht, flags); + rhashtable_shrink(ht); } EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); @@ -398,7 +393,6 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); * rhashtable_remove - remove object from hash table * @ht: hash table * @obj: pointer to hash head inside object - * @flags: allocation flags (table expansion) * * Since the hash chain is single linked, the removal operation needs to * walk the bucket chain upon removal. The removal operation is thus @@ -410,8 +404,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); * The caller must ensure that no concurrent table mutations occur. It is * however valid to have concurrent lookups if they are RCU protected. */ -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, - gfp_t flags) +bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl = rht_dereference(ht->tbl, ht); struct rhash_head __rcu **pprev; @@ -429,7 +422,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, continue; } - rhashtable_remove_pprev(ht, he, pprev, flags); + rhashtable_remove_pprev(ht, he, pprev); return true; } @@ -531,8 +524,10 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .head_offset = offsetof(struct test_obj, node), * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), - * .hashfn = arch_fast_hash, + * .hashfn = jhash, + * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, + * #endif * }; * * Configuration Example 2: Variable length keys @@ -550,9 +545,11 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), - * .hashfn = arch_fast_hash, + * .hashfn = jhash, * .obj_hashfn = my_hash_fn, + * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, + * #endif * }; */ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) @@ -572,7 +569,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (params->nelem_hint) size = rounded_hashtable_size(params); - tbl = bucket_table_alloc(size, GFP_KERNEL); + tbl = bucket_table_alloc(size); if (tbl == NULL) return -ENOMEM; @@ -613,10 +610,12 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy); #define TEST_PTR ((void *) 0xdeadbeef) #define TEST_NEXPANDS 4 -static int test_mutex_is_held(void) +#ifdef CONFIG_PROVE_LOCKING +static int test_mutex_is_held(void *parent) { return 1; } +#endif struct test_obj { void *ptr; @@ -654,15 +653,15 @@ static int __init test_rht_lookup(struct rhashtable *ht) return 0; } -static void test_bucket_stats(struct rhashtable *ht, - struct bucket_table *tbl, - bool quiet) +static void test_bucket_stats(struct rhashtable *ht, bool quiet) { - unsigned int cnt, i, total = 0; + unsigned int cnt, rcu_cnt, i, total = 0; struct test_obj *obj; + struct bucket_table *tbl; + tbl = rht_dereference_rcu(ht->tbl, ht); for (i = 0; i < tbl->size; i++) { - cnt = 0; + rcu_cnt = cnt = 0; if (!quiet) pr_info(" [%#4x/%zu]", i, tbl->size); @@ -674,6 +673,13 @@ static void test_bucket_stats(struct rhashtable *ht, pr_cont(" [%p],", obj); } + rht_for_each_entry_rcu(obj, tbl->buckets[i], node) + rcu_cnt++; + + if (rcu_cnt != cnt) + pr_warn("Test failed: Chain count mismach %d != %d", + cnt, rcu_cnt); + if (!quiet) pr_cont("\n [%#x] first element: %p, chain length: %u\n", i, tbl->buckets[i], cnt); @@ -681,6 +687,9 @@ static void test_bucket_stats(struct rhashtable *ht, pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", total, ht->nelems, TEST_ENTRIES); + + if (total != ht->nelems || total != TEST_ENTRIES) + pr_warn("Test failed: Total count mismatch ^^^"); } static int __init test_rhashtable(struct rhashtable *ht) @@ -707,18 +716,17 @@ static int __init test_rhashtable(struct rhashtable *ht) obj->ptr = TEST_PTR; obj->value = i * 2; - rhashtable_insert(ht, &obj->node, GFP_KERNEL); + rhashtable_insert(ht, &obj->node); } rcu_read_lock(); - tbl = rht_dereference_rcu(ht->tbl, ht); - test_bucket_stats(ht, tbl, true); + test_bucket_stats(ht, true); test_rht_lookup(ht); rcu_read_unlock(); for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table expansion iteration %u...\n", i); - rhashtable_expand(ht, GFP_KERNEL); + rhashtable_expand(ht); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -728,7 +736,7 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table shrinkage iteration %u...\n", i); - rhashtable_shrink(ht, GFP_KERNEL); + rhashtable_shrink(ht); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -736,6 +744,10 @@ static int __init test_rhashtable(struct rhashtable *ht) rcu_read_unlock(); } + rcu_read_lock(); + test_bucket_stats(ht, true); + rcu_read_unlock(); + pr_info(" Deleting %d keys\n", TEST_ENTRIES); for (i = 0; i < TEST_ENTRIES; i++) { u32 key = i * 2; @@ -743,7 +755,7 @@ static int __init test_rhashtable(struct rhashtable *ht) obj = rhashtable_lookup(ht, &key); BUG_ON(!obj); - rhashtable_remove(ht, &obj->node, GFP_KERNEL); + rhashtable_remove(ht, &obj->node); kfree(obj); } @@ -766,8 +778,10 @@ static int __init test_rht_init(void) .head_offset = offsetof(struct test_obj, node), .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), - .hashfn = arch_fast_hash, + .hashfn = jhash, +#ifdef CONFIG_PROVE_LOCKING .mutex_is_held = &test_mutex_is_held, +#endif .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, }; diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 23e070bcf72d3dea7dea8022e0381508662598c3..80d78c51f65fc2bf007b2cc9222bb0714d4b3e68 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -124,7 +124,7 @@ static struct bpf_test tests[] = { { { 0, 0xfffffffd } } }, { - "DIV_KX", + "DIV_MOD_KX", .u.insns = { BPF_STMT(BPF_LD | BPF_IMM, 8), BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2), @@ -134,12 +134,18 @@ static struct bpf_test tests[] = { BPF_STMT(BPF_MISC | BPF_TAX, 0), BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000), BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), BPF_STMT(BPF_RET | BPF_A, 0) }, CLASSIC | FLAG_NO_DATA, { }, - { { 0, 0x40000001 } } + { { 0, 0x20000000 } } }, { "AND_OR_LSH_K", @@ -1756,6 +1762,49 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } } }, + { + "nmap reduced", + .u.insns_int = { + BPF_MOV64_REG(R6, R1), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26), + BPF_MOV32_IMM(R0, 18), + BPF_STX_MEM(BPF_W, R10, R0, -64), + BPF_LDX_MEM(BPF_W, R7, R10, -64), + BPF_LD_IND(BPF_W, R7, 14), + BPF_STX_MEM(BPF_W, R10, R0, -60), + BPF_MOV32_IMM(R0, 280971478), + BPF_STX_MEM(BPF_W, R10, R0, -56), + BPF_LDX_MEM(BPF_W, R7, R10, -56), + BPF_LDX_MEM(BPF_W, R0, R10, -60), + BPF_ALU32_REG(BPF_SUB, R0, R7), + BPF_JMP_IMM(BPF_JNE, R0, 0, 15), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13), + BPF_MOV32_IMM(R0, 22), + BPF_STX_MEM(BPF_W, R10, R0, -56), + BPF_LDX_MEM(BPF_W, R7, R10, -56), + BPF_LD_IND(BPF_H, R7, 14), + BPF_STX_MEM(BPF_W, R10, R0, -52), + BPF_MOV32_IMM(R0, 17366), + BPF_STX_MEM(BPF_W, R10, R0, -48), + BPF_LDX_MEM(BPF_W, R7, R10, -48), + BPF_LDX_MEM(BPF_W, R0, R10, -52), + BPF_ALU32_REG(BPF_SUB, R0, R7), + BPF_JMP_IMM(BPF_JNE, R0, 0, 2), + BPF_MOV32_IMM(R0, 256), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, + { { 38, 256 } } + }, }; static struct net_device dev; diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 142eef55c9e2d92c180786d9919ded72536d94ba..32ffec6ef1643427513529f33086e89fb751f991 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -15,9 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* Jon's code is based on 6lowpan implementation for Contiki which is: @@ -171,37 +168,6 @@ static int uncompress_context_based_src_addr(struct sk_buff *skb, return 0; } -static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr, - struct net_device *dev, skb_delivery_cb deliver_skb) -{ - struct sk_buff *new; - int stat; - - new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb), - GFP_ATOMIC); - kfree_skb(skb); - - if (!new) - return -ENOMEM; - - skb_push(new, sizeof(struct ipv6hdr)); - skb_reset_network_header(new); - skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr)); - - new->protocol = htons(ETH_P_IPV6); - new->pkt_type = PACKET_HOST; - new->dev = dev; - - raw_dump_table(__func__, "raw skb data dump before receiving", - new->data, new->len); - - stat = deliver_skb(new, dev); - - kfree_skb(new); - - return stat; -} - /* Uncompress function for multicast destination address, * when M bit is set. */ @@ -332,10 +298,12 @@ err: /* TTL uncompression values */ static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 }; -int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, - const u8 *saddr, const u8 saddr_type, const u8 saddr_len, - const u8 *daddr, const u8 daddr_type, const u8 daddr_len, - u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb) +int +lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, + const u8 *saddr, const u8 saddr_type, + const u8 saddr_len, const u8 *daddr, + const u8 daddr_type, const u8 daddr_len, + u8 iphc0, u8 iphc1) { struct ipv6hdr hdr = {}; u8 tmp, num_context = 0; @@ -348,7 +316,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, if (iphc1 & LOWPAN_IPHC_CID) { pr_debug("CID flag is set, increase header with one\n"); if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context))) - goto drop; + return -EINVAL; } hdr.version = 6; @@ -360,7 +328,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, */ case 0: /* 00b */ if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) - goto drop; + return -EINVAL; memcpy(&hdr.flow_lbl, &skb->data[0], 3); skb_pull(skb, 3); @@ -373,7 +341,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, */ case 2: /* 10b */ if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) - goto drop; + return -EINVAL; hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30); @@ -383,7 +351,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, */ case 1: /* 01b */ if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) - goto drop; + return -EINVAL; hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); memcpy(&hdr.flow_lbl[1], &skb->data[0], 2); @@ -400,7 +368,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { /* Next header is carried inline */ if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr))) - goto drop; + return -EINVAL; pr_debug("NH flag is set, next header carried inline: %02x\n", hdr.nexthdr); @@ -412,7 +380,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, } else { if (lowpan_fetch_skb(skb, &hdr.hop_limit, sizeof(hdr.hop_limit))) - goto drop; + return -EINVAL; } /* Extract SAM to the tmp variable */ @@ -431,7 +399,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, /* Check on error of previous branch */ if (err) - goto drop; + return -EINVAL; /* Extract DAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03; @@ -446,7 +414,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, tmp); if (err) - goto drop; + return -EINVAL; } } else { err = uncompress_addr(skb, &hdr.daddr, tmp, daddr, @@ -454,28 +422,23 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, pr_debug("dest: stateless compression mode %d dest %pI6c\n", tmp, &hdr.daddr); if (err) - goto drop; + return -EINVAL; } /* UDP data uncompression */ if (iphc0 & LOWPAN_IPHC_NH_C) { struct udphdr uh; - struct sk_buff *new; + const int needed = sizeof(struct udphdr) + sizeof(hdr); if (uncompress_udp_header(skb, &uh)) - goto drop; + return -EINVAL; /* replace the compressed UDP head by the uncompressed UDP * header */ - new = skb_copy_expand(skb, sizeof(struct udphdr), - skb_tailroom(skb), GFP_ATOMIC); - kfree_skb(skb); - - if (!new) - return -ENOMEM; - - skb = new; + err = skb_cow(skb, needed); + if (unlikely(err)) + return err; skb_push(skb, sizeof(struct udphdr)); skb_reset_transport_header(skb); @@ -485,6 +448,10 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, (u8 *)&uh, sizeof(uh)); hdr.nexthdr = UIP_PROTO_UDP; + } else { + err = skb_cow(skb, sizeof(hdr)); + if (unlikely(err)) + return err; } hdr.payload_len = htons(skb->len); @@ -497,15 +464,15 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, hdr.version, ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit, &hdr.daddr); - raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); + skb_push(skb, sizeof(hdr)); + skb_reset_network_header(skb); + skb_copy_to_linear_data(skb, &hdr, sizeof(hdr)); - return skb_deliver(skb, &hdr, dev, deliver_skb); + raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); -drop: - kfree_skb(skb); - return -EINVAL; + return 0; } -EXPORT_SYMBOL_GPL(lowpan_process_data); +EXPORT_SYMBOL_GPL(lowpan_header_decompress); static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift, const struct in6_addr *ipaddr, @@ -535,9 +502,17 @@ static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift, static void compress_udp_header(u8 **hc_ptr, struct sk_buff *skb) { - struct udphdr *uh = udp_hdr(skb); + struct udphdr *uh; u8 tmp; + /* In the case of RAW sockets the transport header is not set by + * the ip6 stack so we must set it ourselves + */ + if (skb->transport_header == skb->network_header) + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + + uh = udp_hdr(skb); + if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) == LOWPAN_NHC_UDP_4BIT_PORT) && ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) == diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 0d441ec8763ec87eec018d2b043b82a5ead50aa0..376805005cc7f8a6d18dab16229a2560d877ed19 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -150,7 +151,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, u16 vlan_tci; vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); - skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); + __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); } skb->dev = vlan->real_dev; @@ -669,6 +670,23 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev, strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); } +static int vlan_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops; + + if (ops->get_ts_info) { + return ops->get_ts_info(vlan->real_dev, info); + } else { + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + } + + return 0; +} + static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct vlan_pcpu_stats *p; @@ -752,6 +770,7 @@ static const struct ethtool_ops vlan_ethtool_ops = { .get_settings = vlan_ethtool_get_settings, .get_drvinfo = vlan_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = vlan_ethtool_get_ts_info, }; static const struct net_device_ops vlan_netdev_ops = { diff --git a/net/Kconfig b/net/Kconfig index 99815b5454bf4d68fdc36153951e434f7219b4d0..ff9ffc17fa0e1fc438e9e4ebec20376ddd6ec969 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -228,6 +228,7 @@ source "net/vmw_vsock/Kconfig" source "net/netlink/Kconfig" source "net/mpls/Kconfig" source "net/hsr/Kconfig" +source "net/switchdev/Kconfig" config RPS boolean diff --git a/net/Makefile b/net/Makefile index 7ed1970074b07bfcf0803f468f62efba7a9018ac..95fc694e4ddc088ca419a745a8567f700a1b535a 100644 --- a/net/Makefile +++ b/net/Makefile @@ -73,3 +73,6 @@ obj-$(CONFIG_OPENVSWITCH) += openvswitch/ obj-$(CONFIG_VSOCKETS) += vmw_vsock/ obj-$(CONFIG_NET_MPLS_GSO) += mpls/ obj-$(CONFIG_HSR) += hsr/ +ifneq ($(CONFIG_NET_SWITCHDEV),) +obj-y += switchdev/ +endif diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index c00897f65a31eb2a7f9cf8fa8da71bfd56cc7fc5..0d0766ea5ab104c5bba47f3b8ca32b1858e9c94d 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1659,7 +1659,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr SOCK_DEBUG(sk, "SK %p: Copy user data (%Zd bytes).\n", sk, len); - err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err) { kfree_skb(skb); err = -EFAULT; @@ -1758,7 +1758,7 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr copied = size; msg->msg_flags |= MSG_TRUNC; } - err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, offset, msg, copied); if (!err && msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_at *, sat, msg->msg_name); diff --git a/net/atm/common.c b/net/atm/common.c index 6a765156a3f6b81fcd63f43504d1a7faca8f206b..b84057e41bd6a364281ea85f24e60b82e4ec6cfd 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -554,7 +554,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, msg->msg_flags |= MSG_TRUNC; } - error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + error = skb_copy_datagram_msg(skb, 0, msg, copied); if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); @@ -570,15 +570,13 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, } int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, - size_t total_len) + size_t size) { struct sock *sk = sock->sk; DEFINE_WAIT(wait); struct atm_vcc *vcc; struct sk_buff *skb; int eff, error; - const void __user *buff; - int size; lock_sock(sk); if (sock->state != SS_CONNECTED) { @@ -589,12 +587,6 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, error = -EISCONN; goto out; } - if (m->msg_iovlen != 1) { - error = -ENOSYS; /* fix this later @@@ */ - goto out; - } - buff = m->msg_iov->iov_base; - size = m->msg_iov->iov_len; vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || @@ -607,7 +599,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, error = 0; goto out; } - if (size < 0 || size > vcc->qos.txtp.max_sdu) { + if (size > vcc->qos.txtp.max_sdu) { error = -EMSGSIZE; goto out; } @@ -639,7 +631,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, goto out; skb->dev = NULL; /* for paths shared with net_device interfaces */ ATM_SKB(skb)->atm_options = vcc->atm_options; - if (copy_from_user(skb_put(skb, size), buff, size)) { + if (copy_from_iter(skb_put(skb, size), size, &m->msg_iter) != size) { kfree_skb(skb); error = -EFAULT; goto out; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index c35c3f48fc0ff6e130d940b3d8b24b208250a738..ca049a7c9287d703f789b5842472a768152dd7ca 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1549,7 +1549,7 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, skb_reserve(skb, size - len); /* User data follows immediately after the AX.25 data */ - if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + if (memcpy_from_msg(skb_put(skb, len), msg, len)) { err = -EFAULT; kfree_skb(skb); goto out; @@ -1634,7 +1634,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + skb_copy_datagram_msg(skb, 0, msg, copied); if (msg->msg_name) { ax25_digi digi; diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index c2e0d14433df6678011e07b00045c68e5cf7f998..76617be1e797ba6d41c72fbc8f582d5a5614ff3e 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -53,7 +53,7 @@ struct skb_cb { * The list contains struct lowpan_dev elements. */ static LIST_HEAD(bt_6lowpan_devices); -static DEFINE_RWLOCK(devices_lock); +static DEFINE_SPINLOCK(devices_lock); /* If psm is set to 0 (default value), then 6lowpan is disabled. * Other values are used to indicate a Protocol Service Multiplexer @@ -67,6 +67,7 @@ static struct l2cap_chan *listen_chan; struct lowpan_peer { struct list_head list; + struct rcu_head rcu; struct l2cap_chan *chan; /* peer addresses in various formats */ @@ -93,13 +94,14 @@ static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer) { - list_add(&peer->list, &dev->peers); + list_add_rcu(&peer->list, &dev->peers); atomic_inc(&dev->peer_count); } static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) { - list_del(&peer->list); + list_del_rcu(&peer->list); + kfree_rcu(peer, rcu); module_put(THIS_MODULE); @@ -114,31 +116,37 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer) static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev, bdaddr_t *ba, __u8 type) { - struct lowpan_peer *peer, *tmp; + struct lowpan_peer *peer; BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count), ba, type); - list_for_each_entry_safe(peer, tmp, &dev->peers, list) { + rcu_read_lock(); + + list_for_each_entry_rcu(peer, &dev->peers, list) { BT_DBG("dst addr %pMR dst type %d", &peer->chan->dst, peer->chan->dst_type); if (bacmp(&peer->chan->dst, ba)) continue; - if (type == peer->chan->dst_type) + if (type == peer->chan->dst_type) { + rcu_read_unlock(); return peer; + } } + rcu_read_unlock(); + return NULL; } -static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev, - struct l2cap_chan *chan) +static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev, + struct l2cap_chan *chan) { - struct lowpan_peer *peer, *tmp; + struct lowpan_peer *peer; - list_for_each_entry_safe(peer, tmp, &dev->peers, list) { + list_for_each_entry_rcu(peer, &dev->peers, list) { if (peer->chan == chan) return peer; } @@ -146,12 +154,12 @@ static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev, return NULL; } -static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev, - struct l2cap_conn *conn) +static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev, + struct l2cap_conn *conn) { - struct lowpan_peer *peer, *tmp; + struct lowpan_peer *peer; - list_for_each_entry_safe(peer, tmp, &dev->peers, list) { + list_for_each_entry_rcu(peer, &dev->peers, list) { if (peer->chan->conn == conn) return peer; } @@ -163,7 +171,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, struct in6_addr *daddr, struct sk_buff *skb) { - struct lowpan_peer *peer, *tmp; + struct lowpan_peer *peer; struct in6_addr *nexthop; struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); int count = atomic_read(&dev->peer_count); @@ -174,9 +182,13 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, * send the packet. If only one peer exists, then we can send the * packet right away. */ - if (count == 1) - return list_first_entry(&dev->peers, struct lowpan_peer, - list); + if (count == 1) { + rcu_read_lock(); + peer = list_first_or_null_rcu(&dev->peers, struct lowpan_peer, + list); + rcu_read_unlock(); + return peer; + } if (!rt) { nexthop = &lowpan_cb(skb)->gw; @@ -195,53 +207,57 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, BT_DBG("gw %pI6c", nexthop); - list_for_each_entry_safe(peer, tmp, &dev->peers, list) { + rcu_read_lock(); + + list_for_each_entry_rcu(peer, &dev->peers, list) { BT_DBG("dst addr %pMR dst type %d ip %pI6c", &peer->chan->dst, peer->chan->dst_type, &peer->peer_addr); - if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) + if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) { + rcu_read_unlock(); return peer; + } } + rcu_read_unlock(); + return NULL; } static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) { - struct lowpan_dev *entry, *tmp; + struct lowpan_dev *entry; struct lowpan_peer *peer = NULL; - unsigned long flags; - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { - peer = peer_lookup_conn(entry, conn); + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + peer = __peer_lookup_conn(entry, conn); if (peer) break; } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); return peer; } static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn) { - struct lowpan_dev *entry, *tmp; + struct lowpan_dev *entry; struct lowpan_dev *dev = NULL; - unsigned long flags; - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { if (conn->hcon->hdev == entry->hdev) { dev = entry; break; } } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); return dev; } @@ -249,59 +265,49 @@ static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn) static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *skb_cp; - int ret; skb_cp = skb_copy(skb, GFP_ATOMIC); if (!skb_cp) - return -ENOMEM; - - ret = netif_rx(skb_cp); - if (ret < 0) { - BT_DBG("receive skb %d", ret); return NET_RX_DROP; - } - return ret; + return netif_rx(skb_cp); } -static int process_data(struct sk_buff *skb, struct net_device *netdev, - struct l2cap_chan *chan) +static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, + struct l2cap_chan *chan) { const u8 *saddr, *daddr; u8 iphc0, iphc1; struct lowpan_dev *dev; struct lowpan_peer *peer; - unsigned long flags; dev = lowpan_dev(netdev); - read_lock_irqsave(&devices_lock, flags); - peer = peer_lookup_chan(dev, chan); - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_lock(); + peer = __peer_lookup_chan(dev, chan); + rcu_read_unlock(); if (!peer) - goto drop; + return -EINVAL; saddr = peer->eui64_addr; daddr = dev->netdev->dev_addr; /* at least two bytes will be used for the encoding */ if (skb->len < 2) - goto drop; + return -EINVAL; if (lowpan_fetch_skb_u8(skb, &iphc0)) - goto drop; + return -EINVAL; if (lowpan_fetch_skb_u8(skb, &iphc1)) - goto drop; + return -EINVAL; - return lowpan_process_data(skb, netdev, - saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN, - daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN, - iphc0, iphc1, give_skb_to_upper); + return lowpan_header_decompress(skb, netdev, + saddr, IEEE802154_ADDR_LONG, + EUI64_ADDR_LEN, daddr, + IEEE802154_ADDR_LONG, EUI64_ADDR_LEN, + iphc0, iphc1); -drop: - kfree_skb(skb); - return -EINVAL; } static int recv_pkt(struct sk_buff *skb, struct net_device *dev, @@ -316,6 +322,10 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, if (dev->type != ARPHRD_6LOWPAN) goto drop; + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto drop; + /* check that it's our buffer */ if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { /* Copy the packet so that the IPv6 header is @@ -340,8 +350,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; - kfree_skb(local_skb); - kfree_skb(skb); + consume_skb(local_skb); + consume_skb(skb); } else { switch (skb->data[0] & 0xe0) { case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ @@ -349,14 +359,27 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, if (!local_skb) goto drop; - ret = process_data(local_skb, dev, chan); - if (ret != NET_RX_SUCCESS) + ret = iphc_decompress(local_skb, dev, chan); + if (ret < 0) { + kfree_skb(local_skb); + goto drop; + } + + local_skb->protocol = htons(ETH_P_IPV6); + local_skb->pkt_type = PACKET_HOST; + local_skb->dev = dev; + + if (give_skb_to_upper(local_skb, dev) + != NET_RX_SUCCESS) { + kfree_skb(local_skb); goto drop; + } dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; - kfree_skb(skb); + consume_skb(local_skb); + consume_skb(skb); break; default: break; @@ -443,7 +466,6 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, if (ipv6_addr_is_multicast(&ipv6_daddr)) { lowpan_cb(skb)->chan = NULL; } else { - unsigned long flags; u8 addr_type; /* Get destination BT device from skb. @@ -454,19 +476,14 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, BT_DBG("dest addr %pMR type %d IP %pI6c", &addr, addr_type, &ipv6_daddr); - read_lock_irqsave(&devices_lock, flags); peer = peer_lookup_ba(dev, &addr, addr_type); - read_unlock_irqrestore(&devices_lock, flags); - if (!peer) { /* The packet might be sent to 6lowpan interface * because of routing (either via default route * or user set route) so get peer according to * the destination address. */ - read_lock_irqsave(&devices_lock, flags); peer = peer_lookup_dst(dev, &ipv6_daddr, skb); - read_unlock_irqrestore(&devices_lock, flags); if (!peer) { BT_DBG("no such peer %pMR found", &addr); return -ENOENT; @@ -520,12 +537,12 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, */ chan->data = skb; - memset(&msg, 0, sizeof(msg)); - msg.msg_iov = (struct iovec *) &iv; - msg.msg_iovlen = 1; iv.iov_base = skb->data; iv.iov_len = skb->len; + memset(&msg, 0, sizeof(msg)); + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len); + err = l2cap_chan_send(chan, &msg, skb->len); if (err > 0) { netdev->stats.tx_bytes += err; @@ -549,14 +566,13 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) { struct sk_buff *local_skb; - struct lowpan_dev *entry, *tmp; - unsigned long flags; + struct lowpan_dev *entry; int err = 0; - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { - struct lowpan_peer *pentry, *ptmp; + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + struct lowpan_peer *pentry; struct lowpan_dev *dev; if (entry->netdev != netdev) @@ -564,7 +580,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) dev = lowpan_dev(entry->netdev); - list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) { + list_for_each_entry_rcu(pentry, &dev->peers, list) { int ret; local_skb = skb_clone(skb, GFP_ATOMIC); @@ -581,7 +597,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) } } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); return err; } @@ -591,17 +607,13 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) int err = 0; bdaddr_t addr; u8 addr_type; - struct sk_buff *tmpskb; /* We must take a copy of the skb before we modify/replace the ipv6 * header as the header could be used elsewhere */ - tmpskb = skb_unshare(skb, GFP_ATOMIC); - if (!tmpskb) { - kfree_skb(skb); + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) return NET_XMIT_DROP; - } - skb = tmpskb; /* Return values from setup_header() * <0 - error, packet is dropped @@ -638,7 +650,26 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) return err < 0 ? NET_XMIT_DROP : err; } +static struct lock_class_key bt_tx_busylock; +static struct lock_class_key bt_netdev_xmit_lock_key; + +static void bt_set_lockdep_class_one(struct net_device *dev, + struct netdev_queue *txq, + void *_unused) +{ + lockdep_set_class(&txq->_xmit_lock, &bt_netdev_xmit_lock_key); +} + +static int bt_dev_init(struct net_device *dev) +{ + netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL); + dev->qdisc_tx_busylock = &bt_tx_busylock; + + return 0; +} + static const struct net_device_ops netdev_ops = { + .ndo_init = bt_dev_init, .ndo_start_xmit = bt_xmit, }; @@ -783,7 +814,6 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, struct lowpan_dev *dev) { struct lowpan_peer *peer; - unsigned long flags; peer = kzalloc(sizeof(*peer), GFP_ATOMIC); if (!peer) @@ -806,10 +836,10 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, */ set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8); - write_lock_irqsave(&devices_lock, flags); + spin_lock(&devices_lock); INIT_LIST_HEAD(&peer->list); peer_add(dev, peer); - write_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); /* Notifying peers about us needs to be done without locks held */ INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); @@ -822,7 +852,6 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) { struct net_device *netdev; int err = 0; - unsigned long flags; netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN, netdev_setup); @@ -852,10 +881,10 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) (*dev)->hdev = chan->conn->hcon->hdev; INIT_LIST_HEAD(&(*dev)->peers); - write_lock_irqsave(&devices_lock, flags); + spin_lock(&devices_lock); INIT_LIST_HEAD(&(*dev)->list); - list_add(&(*dev)->list, &bt_6lowpan_devices); - write_unlock_irqrestore(&devices_lock, flags); + list_add_rcu(&(*dev)->list, &bt_6lowpan_devices); + spin_unlock(&devices_lock); return 0; @@ -909,11 +938,10 @@ static void delete_netdev(struct work_struct *work) static void chan_close_cb(struct l2cap_chan *chan) { - struct lowpan_dev *entry, *tmp; + struct lowpan_dev *entry; struct lowpan_dev *dev = NULL; struct lowpan_peer *peer; int err = -ENOENT; - unsigned long flags; bool last = false, removed = true; BT_DBG("chan %p conn %p", chan, chan->conn); @@ -928,11 +956,11 @@ static void chan_close_cb(struct l2cap_chan *chan) removed = false; } - write_lock_irqsave(&devices_lock, flags); + spin_lock(&devices_lock); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { dev = lowpan_dev(entry->netdev); - peer = peer_lookup_chan(dev, chan); + peer = __peer_lookup_chan(dev, chan); if (peer) { last = peer_del(dev, peer); err = 0; @@ -943,13 +971,12 @@ static void chan_close_cb(struct l2cap_chan *chan) atomic_read(&chan->kref.refcount)); l2cap_chan_put(chan); - kfree(peer); break; } } if (!err && last && dev && !atomic_read(&dev->peer_count)) { - write_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); cancel_delayed_work_sync(&dev->notify_peers); @@ -960,7 +987,7 @@ static void chan_close_cb(struct l2cap_chan *chan) schedule_work(&entry->delete_netdev); } } else { - write_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); } return; @@ -1023,7 +1050,6 @@ static const struct l2cap_ops bt_6lowpan_chan_ops = { .suspend = chan_suspend_cb, .get_sndtimeo = chan_get_sndtimeo_cb, .alloc_skb = chan_alloc_skb_cb, - .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec, .teardown = l2cap_chan_no_teardown, .defer = l2cap_chan_no_defer, @@ -1103,6 +1129,8 @@ static struct l2cap_chan *bt_6lowpan_listen(void) pchan->state = BT_LISTEN; pchan->src_type = BDADDR_LE_PUBLIC; + atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT); + BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan, pchan->src_type); @@ -1152,10 +1180,9 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, static void disconnect_all_peers(void) { - struct lowpan_dev *entry, *tmp_dev; + struct lowpan_dev *entry; struct lowpan_peer *peer, *tmp_peer, *new_peer; struct list_head peers; - unsigned long flags; INIT_LIST_HEAD(&peers); @@ -1164,10 +1191,10 @@ static void disconnect_all_peers(void) * with the same list at the same time. */ - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) { - list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) { + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(peer, &entry->peers, list) { new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC); if (!new_peer) break; @@ -1179,26 +1206,36 @@ static void disconnect_all_peers(void) } } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); + spin_lock(&devices_lock); list_for_each_entry_safe(peer, tmp_peer, &peers, list) { l2cap_chan_close(peer->chan, ENOENT); - kfree(peer); + + list_del_rcu(&peer->list); + kfree_rcu(peer, rcu); + + module_put(THIS_MODULE); } + spin_unlock(&devices_lock); } -static int lowpan_psm_set(void *data, u64 val) -{ +struct set_psm { + struct work_struct work; u16 psm; +}; - psm = val; - if (psm == 0 || psm_6lowpan != psm) +static void do_psm_set(struct work_struct *work) +{ + struct set_psm *set_psm = container_of(work, struct set_psm, work); + + if (set_psm->psm == 0 || psm_6lowpan != set_psm->psm) /* Disconnect existing connections if 6lowpan is * disabled (psm = 0), or if psm changes. */ disconnect_all_peers(); - psm_6lowpan = psm; + psm_6lowpan = set_psm->psm; if (listen_chan) { l2cap_chan_close(listen_chan, 0); @@ -1207,6 +1244,22 @@ static int lowpan_psm_set(void *data, u64 val) listen_chan = bt_6lowpan_listen(); + kfree(set_psm); +} + +static int lowpan_psm_set(void *data, u64 val) +{ + struct set_psm *set_psm; + + set_psm = kzalloc(sizeof(*set_psm), GFP_KERNEL); + if (!set_psm) + return -ENOMEM; + + set_psm->psm = val; + INIT_WORK(&set_psm->work, do_psm_set); + + schedule_work(&set_psm->work); + return 0; } @@ -1288,19 +1341,18 @@ static ssize_t lowpan_control_write(struct file *fp, static int lowpan_control_show(struct seq_file *f, void *ptr) { - struct lowpan_dev *entry, *tmp_dev; - struct lowpan_peer *peer, *tmp_peer; - unsigned long flags; + struct lowpan_dev *entry; + struct lowpan_peer *peer; - read_lock_irqsave(&devices_lock, flags); + spin_lock(&devices_lock); - list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) { - list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) + list_for_each_entry(entry, &bt_6lowpan_devices, list) { + list_for_each_entry(peer, &entry->peers, list) seq_printf(f, "%pMR (type %u)\n", &peer->chan->dst, peer->chan->dst_type); } - read_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); return 0; } @@ -1322,7 +1374,6 @@ static void disconnect_devices(void) { struct lowpan_dev *entry, *tmp, *new_dev; struct list_head devices; - unsigned long flags; INIT_LIST_HEAD(&devices); @@ -1331,9 +1382,9 @@ static void disconnect_devices(void) * devices list. */ - read_lock_irqsave(&devices_lock, flags); + rcu_read_lock(); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC); if (!new_dev) break; @@ -1341,10 +1392,10 @@ static void disconnect_devices(void) new_dev->netdev = entry->netdev; INIT_LIST_HEAD(&new_dev->list); - list_add(&new_dev->list, &devices); + list_add_rcu(&new_dev->list, &devices); } - read_unlock_irqrestore(&devices_lock, flags); + rcu_read_unlock(); list_for_each_entry_safe(entry, tmp, &devices, list) { ifdown(entry->netdev); @@ -1359,17 +1410,15 @@ static int device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - struct lowpan_dev *entry, *tmp; - unsigned long flags; + struct lowpan_dev *entry; if (netdev->type != ARPHRD_6LOWPAN) return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: - write_lock_irqsave(&devices_lock, flags); - list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, - list) { + spin_lock(&devices_lock); + list_for_each_entry(entry, &bt_6lowpan_devices, list) { if (entry->netdev == netdev) { BT_DBG("Unregistered netdev %s %p", netdev->name, netdev); @@ -1378,7 +1427,7 @@ static int device_event(struct notifier_block *unused, break; } } - write_unlock_irqrestore(&devices_lock, flags); + spin_unlock(&devices_lock); break; } diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index 600fb29288f47c48ce17583d769d502746854bd1..29bcafc41adfd19b59b0437021d5ebd044dcef9b 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -10,6 +10,7 @@ menuconfig BT select CRYPTO select CRYPTO_BLKCIPHER select CRYPTO_AES + select CRYPTO_CMAC select CRYPTO_ECB select CRYPTO_SHA256 help @@ -39,11 +40,10 @@ menuconfig BT to Bluetooth kernel modules are provided in the BlueZ packages. For more information, see . -config BT_6LOWPAN - tristate "Bluetooth 6LoWPAN support" - depends on BT && 6LOWPAN - help - IPv6 compression over Bluetooth Low Energy. +config BT_BREDR + bool "Bluetooth Classic (BR/EDR) features" + depends on BT + default y source "net/bluetooth/rfcomm/Kconfig" @@ -53,4 +53,15 @@ source "net/bluetooth/cmtp/Kconfig" source "net/bluetooth/hidp/Kconfig" +config BT_LE + bool "Bluetooth Low Energy (LE) features" + depends on BT + default y + +config BT_6LOWPAN + tristate "Bluetooth 6LoWPAN support" + depends on BT_LE && 6LOWPAN + help + IPv6 compression over Bluetooth Low Energy. + source "drivers/bluetooth/Kconfig" diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 886e9aa3ecf1ffa3d7cd93d7aea873bfb25b5c1e..a5432a6a0ae6075d13a596a9f58e2380c7452b08 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -13,6 +13,6 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ - a2mp.o amp.o + a2mp.o amp.o ecc.o subdir-ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index 5dcade511fdbf7da80995ac2c8218171663b339f..cedfbda15dad8514b606d3f62c86c81352f6c09d 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c @@ -60,8 +60,7 @@ void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data) memset(&msg, 0, sizeof(msg)); - msg.msg_iov = (struct iovec *) &iv; - msg.msg_iovlen = 1; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, total_len); l2cap_chan_send(chan, &msg, total_len); @@ -720,7 +719,6 @@ static const struct l2cap_ops a2mp_chan_ops = { .resume = l2cap_chan_no_resume, .set_shutdown = l2cap_chan_no_set_shutdown, .get_sndtimeo = l2cap_chan_no_get_sndtimeo, - .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec, }; static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked) diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 339c74ad45538f931ee1c31edb96c3f3f3298074..012e3b03589d8ce305deccf09189971dd86d1b39 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -31,7 +31,7 @@ #include #include -#define VERSION "2.19" +#define VERSION "2.20" /* Bluetooth sockets */ #define BT_MAX_PROTO 8 @@ -237,7 +237,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, } skb_reset_transport_header(skb); - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); @@ -328,7 +328,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, } chunk = min_t(unsigned int, skb->len, size); - if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) { + if (skb_copy_datagram_msg(skb, 0, msg, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (!copied) copied = -EFAULT; diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c index 2640d78f30b80080f5b240c8a25cf668392e2961..ee016f03910005de87cc45a7d04b2a3989d90c2e 100644 --- a/net/bluetooth/amp.c +++ b/net/bluetooth/amp.c @@ -134,6 +134,7 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output) { struct crypto_shash *tfm; + struct shash_desc *shash; int ret; if (!ksize) @@ -148,18 +149,24 @@ static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output) ret = crypto_shash_setkey(tfm, key, ksize); if (ret) { BT_DBG("crypto_ahash_setkey failed: err %d", ret); - } else { - char desc[sizeof(struct shash_desc) + - crypto_shash_descsize(tfm)] CRYPTO_MINALIGN_ATTR; - struct shash_desc *shash = (struct shash_desc *)desc; - - shash->tfm = tfm; - shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + goto failed; + } - ret = crypto_shash_digest(shash, plaintext, psize, - output); + shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), + GFP_KERNEL); + if (!shash) { + ret = -ENOMEM; + goto failed; } + shash->tfm = tfm; + shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_digest(shash, plaintext, psize, output); + + kfree(shash); + +failed: crypto_free_shash(tfm); return ret; } diff --git a/net/bluetooth/bnep/Kconfig b/net/bluetooth/bnep/Kconfig index 71791fc9f6b1f93269880071af6150e3d67d2d72..9b70317c49dc8a7ba1534e8504b9ea3c1bc8d526 100644 --- a/net/bluetooth/bnep/Kconfig +++ b/net/bluetooth/bnep/Kconfig @@ -1,6 +1,6 @@ config BT_BNEP tristate "BNEP protocol support" - depends on BT + depends on BT_BREDR select CRC32 help BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig index 94cbf42ce155b290ab0a4da9e975432006a6b6de..939da0fbdd88628f73a471ba9bfa6148546701b2 100644 --- a/net/bluetooth/cmtp/Kconfig +++ b/net/bluetooth/cmtp/Kconfig @@ -1,6 +1,6 @@ config BT_CMTP tristate "CMTP protocol support" - depends on BT && ISDN_CAPI + depends on BT_BREDR && ISDN_CAPI help CMTP (CAPI Message Transport Protocol) is a transport layer for CAPI messages. CMTP is required for the Bluetooth Common diff --git a/net/bluetooth/ecc.c b/net/bluetooth/ecc.c new file mode 100644 index 0000000000000000000000000000000000000000..e1709f8467acacb216241ec03f845a0043464c3b --- /dev/null +++ b/net/bluetooth/ecc.c @@ -0,0 +1,816 @@ +/* + * Copyright (c) 2013, Kenneth MacKay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "ecc.h" + +/* 256-bit curve */ +#define ECC_BYTES 32 + +#define MAX_TRIES 16 + +/* Number of u64's needed */ +#define NUM_ECC_DIGITS (ECC_BYTES / 8) + +struct ecc_point { + u64 x[NUM_ECC_DIGITS]; + u64 y[NUM_ECC_DIGITS]; +}; + +typedef struct { + u64 m_low; + u64 m_high; +} uint128_t; + +#define CURVE_P_32 { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull, \ + 0x0000000000000000ull, 0xFFFFFFFF00000001ull } + +#define CURVE_G_32 { \ + { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull, \ + 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull }, \ + { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull, \ + 0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull } \ +} + +#define CURVE_N_32 { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull, \ + 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull } + +static u64 curve_p[NUM_ECC_DIGITS] = CURVE_P_32; +static struct ecc_point curve_g = CURVE_G_32; +static u64 curve_n[NUM_ECC_DIGITS] = CURVE_N_32; + +static void vli_clear(u64 *vli) +{ + int i; + + for (i = 0; i < NUM_ECC_DIGITS; i++) + vli[i] = 0; +} + +/* Returns true if vli == 0, false otherwise. */ +static bool vli_is_zero(const u64 *vli) +{ + int i; + + for (i = 0; i < NUM_ECC_DIGITS; i++) { + if (vli[i]) + return false; + } + + return true; +} + +/* Returns nonzero if bit bit of vli is set. */ +static u64 vli_test_bit(const u64 *vli, unsigned int bit) +{ + return (vli[bit / 64] & ((u64) 1 << (bit % 64))); +} + +/* Counts the number of 64-bit "digits" in vli. */ +static unsigned int vli_num_digits(const u64 *vli) +{ + int i; + + /* Search from the end until we find a non-zero digit. + * We do it in reverse because we expect that most digits will + * be nonzero. + */ + for (i = NUM_ECC_DIGITS - 1; i >= 0 && vli[i] == 0; i--); + + return (i + 1); +} + +/* Counts the number of bits required for vli. */ +static unsigned int vli_num_bits(const u64 *vli) +{ + unsigned int i, num_digits; + u64 digit; + + num_digits = vli_num_digits(vli); + if (num_digits == 0) + return 0; + + digit = vli[num_digits - 1]; + for (i = 0; digit; i++) + digit >>= 1; + + return ((num_digits - 1) * 64 + i); +} + +/* Sets dest = src. */ +static void vli_set(u64 *dest, const u64 *src) +{ + int i; + + for (i = 0; i < NUM_ECC_DIGITS; i++) + dest[i] = src[i]; +} + +/* Returns sign of left - right. */ +static int vli_cmp(const u64 *left, const u64 *right) +{ + int i; + + for (i = NUM_ECC_DIGITS - 1; i >= 0; i--) { + if (left[i] > right[i]) + return 1; + else if (left[i] < right[i]) + return -1; + } + + return 0; +} + +/* Computes result = in << c, returning carry. Can modify in place + * (if result == in). 0 < shift < 64. + */ +static u64 vli_lshift(u64 *result, const u64 *in, + unsigned int shift) +{ + u64 carry = 0; + int i; + + for (i = 0; i < NUM_ECC_DIGITS; i++) { + u64 temp = in[i]; + + result[i] = (temp << shift) | carry; + carry = temp >> (64 - shift); + } + + return carry; +} + +/* Computes vli = vli >> 1. */ +static void vli_rshift1(u64 *vli) +{ + u64 *end = vli; + u64 carry = 0; + + vli += NUM_ECC_DIGITS; + + while (vli-- > end) { + u64 temp = *vli; + *vli = (temp >> 1) | carry; + carry = temp << 63; + } +} + +/* Computes result = left + right, returning carry. Can modify in place. */ +static u64 vli_add(u64 *result, const u64 *left, + const u64 *right) +{ + u64 carry = 0; + int i; + + for (i = 0; i < NUM_ECC_DIGITS; i++) { + u64 sum; + + sum = left[i] + right[i] + carry; + if (sum != left[i]) + carry = (sum < left[i]); + + result[i] = sum; + } + + return carry; +} + +/* Computes result = left - right, returning borrow. Can modify in place. */ +static u64 vli_sub(u64 *result, const u64 *left, const u64 *right) +{ + u64 borrow = 0; + int i; + + for (i = 0; i < NUM_ECC_DIGITS; i++) { + u64 diff; + + diff = left[i] - right[i] - borrow; + if (diff != left[i]) + borrow = (diff > left[i]); + + result[i] = diff; + } + + return borrow; +} + +static uint128_t mul_64_64(u64 left, u64 right) +{ + u64 a0 = left & 0xffffffffull; + u64 a1 = left >> 32; + u64 b0 = right & 0xffffffffull; + u64 b1 = right >> 32; + u64 m0 = a0 * b0; + u64 m1 = a0 * b1; + u64 m2 = a1 * b0; + u64 m3 = a1 * b1; + uint128_t result; + + m2 += (m0 >> 32); + m2 += m1; + + /* Overflow */ + if (m2 < m1) + m3 += 0x100000000ull; + + result.m_low = (m0 & 0xffffffffull) | (m2 << 32); + result.m_high = m3 + (m2 >> 32); + + return result; +} + +static uint128_t add_128_128(uint128_t a, uint128_t b) +{ + uint128_t result; + + result.m_low = a.m_low + b.m_low; + result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low); + + return result; +} + +static void vli_mult(u64 *result, const u64 *left, const u64 *right) +{ + uint128_t r01 = { 0, 0 }; + u64 r2 = 0; + unsigned int i, k; + + /* Compute each digit of result in sequence, maintaining the + * carries. + */ + for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; k++) { + unsigned int min; + + if (k < NUM_ECC_DIGITS) + min = 0; + else + min = (k + 1) - NUM_ECC_DIGITS; + + for (i = min; i <= k && i < NUM_ECC_DIGITS; i++) { + uint128_t product; + + product = mul_64_64(left[i], right[k - i]); + + r01 = add_128_128(r01, product); + r2 += (r01.m_high < product.m_high); + } + + result[k] = r01.m_low; + r01.m_low = r01.m_high; + r01.m_high = r2; + r2 = 0; + } + + result[NUM_ECC_DIGITS * 2 - 1] = r01.m_low; +} + +static void vli_square(u64 *result, const u64 *left) +{ + uint128_t r01 = { 0, 0 }; + u64 r2 = 0; + int i, k; + + for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; k++) { + unsigned int min; + + if (k < NUM_ECC_DIGITS) + min = 0; + else + min = (k + 1) - NUM_ECC_DIGITS; + + for (i = min; i <= k && i <= k - i; i++) { + uint128_t product; + + product = mul_64_64(left[i], left[k - i]); + + if (i < k - i) { + r2 += product.m_high >> 63; + product.m_high = (product.m_high << 1) | + (product.m_low >> 63); + product.m_low <<= 1; + } + + r01 = add_128_128(r01, product); + r2 += (r01.m_high < product.m_high); + } + + result[k] = r01.m_low; + r01.m_low = r01.m_high; + r01.m_high = r2; + r2 = 0; + } + + result[NUM_ECC_DIGITS * 2 - 1] = r01.m_low; +} + +/* Computes result = (left + right) % mod. + * Assumes that left < mod and right < mod, result != mod. + */ +static void vli_mod_add(u64 *result, const u64 *left, const u64 *right, + const u64 *mod) +{ + u64 carry; + + carry = vli_add(result, left, right); + + /* result > mod (result = mod + remainder), so subtract mod to + * get remainder. + */ + if (carry || vli_cmp(result, mod) >= 0) + vli_sub(result, result, mod); +} + +/* Computes result = (left - right) % mod. + * Assumes that left < mod and right < mod, result != mod. + */ +static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right, + const u64 *mod) +{ + u64 borrow = vli_sub(result, left, right); + + /* In this case, p_result == -diff == (max int) - diff. + * Since -x % d == d - x, we can get the correct result from + * result + mod (with overflow). + */ + if (borrow) + vli_add(result, result, mod); +} + +/* Computes result = product % curve_p + from http://www.nsa.gov/ia/_files/nist-routines.pdf */ +static void vli_mmod_fast(u64 *result, const u64 *product) +{ + u64 tmp[NUM_ECC_DIGITS]; + int carry; + + /* t */ + vli_set(result, product); + + /* s1 */ + tmp[0] = 0; + tmp[1] = product[5] & 0xffffffff00000000ull; + tmp[2] = product[6]; + tmp[3] = product[7]; + carry = vli_lshift(tmp, tmp, 1); + carry += vli_add(result, result, tmp); + + /* s2 */ + tmp[1] = product[6] << 32; + tmp[2] = (product[6] >> 32) | (product[7] << 32); + tmp[3] = product[7] >> 32; + carry += vli_lshift(tmp, tmp, 1); + carry += vli_add(result, result, tmp); + + /* s3 */ + tmp[0] = product[4]; + tmp[1] = product[5] & 0xffffffff; + tmp[2] = 0; + tmp[3] = product[7]; + carry += vli_add(result, result, tmp); + + /* s4 */ + tmp[0] = (product[4] >> 32) | (product[5] << 32); + tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull); + tmp[2] = product[7]; + tmp[3] = (product[6] >> 32) | (product[4] << 32); + carry += vli_add(result, result, tmp); + + /* d1 */ + tmp[0] = (product[5] >> 32) | (product[6] << 32); + tmp[1] = (product[6] >> 32); + tmp[2] = 0; + tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32); + carry -= vli_sub(result, result, tmp); + + /* d2 */ + tmp[0] = product[6]; + tmp[1] = product[7]; + tmp[2] = 0; + tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull); + carry -= vli_sub(result, result, tmp); + + /* d3 */ + tmp[0] = (product[6] >> 32) | (product[7] << 32); + tmp[1] = (product[7] >> 32) | (product[4] << 32); + tmp[2] = (product[4] >> 32) | (product[5] << 32); + tmp[3] = (product[6] << 32); + carry -= vli_sub(result, result, tmp); + + /* d4 */ + tmp[0] = product[7]; + tmp[1] = product[4] & 0xffffffff00000000ull; + tmp[2] = product[5]; + tmp[3] = product[6] & 0xffffffff00000000ull; + carry -= vli_sub(result, result, tmp); + + if (carry < 0) { + do { + carry += vli_add(result, result, curve_p); + } while (carry < 0); + } else { + while (carry || vli_cmp(curve_p, result) != 1) + carry -= vli_sub(result, result, curve_p); + } +} + +/* Computes result = (left * right) % curve_p. */ +static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right) +{ + u64 product[2 * NUM_ECC_DIGITS]; + + vli_mult(product, left, right); + vli_mmod_fast(result, product); +} + +/* Computes result = left^2 % curve_p. */ +static void vli_mod_square_fast(u64 *result, const u64 *left) +{ + u64 product[2 * NUM_ECC_DIGITS]; + + vli_square(product, left); + vli_mmod_fast(result, product); +} + +#define EVEN(vli) (!(vli[0] & 1)) +/* Computes result = (1 / p_input) % mod. All VLIs are the same size. + * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide" + * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf + */ +static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod) +{ + u64 a[NUM_ECC_DIGITS], b[NUM_ECC_DIGITS]; + u64 u[NUM_ECC_DIGITS], v[NUM_ECC_DIGITS]; + u64 carry; + int cmp_result; + + if (vli_is_zero(input)) { + vli_clear(result); + return; + } + + vli_set(a, input); + vli_set(b, mod); + vli_clear(u); + u[0] = 1; + vli_clear(v); + + while ((cmp_result = vli_cmp(a, b)) != 0) { + carry = 0; + + if (EVEN(a)) { + vli_rshift1(a); + + if (!EVEN(u)) + carry = vli_add(u, u, mod); + + vli_rshift1(u); + if (carry) + u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull; + } else if (EVEN(b)) { + vli_rshift1(b); + + if (!EVEN(v)) + carry = vli_add(v, v, mod); + + vli_rshift1(v); + if (carry) + v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull; + } else if (cmp_result > 0) { + vli_sub(a, a, b); + vli_rshift1(a); + + if (vli_cmp(u, v) < 0) + vli_add(u, u, mod); + + vli_sub(u, u, v); + if (!EVEN(u)) + carry = vli_add(u, u, mod); + + vli_rshift1(u); + if (carry) + u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull; + } else { + vli_sub(b, b, a); + vli_rshift1(b); + + if (vli_cmp(v, u) < 0) + vli_add(v, v, mod); + + vli_sub(v, v, u); + if (!EVEN(v)) + carry = vli_add(v, v, mod); + + vli_rshift1(v); + if (carry) + v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull; + } + } + + vli_set(result, u); +} + +/* ------ Point operations ------ */ + +/* Returns true if p_point is the point at infinity, false otherwise. */ +static bool ecc_point_is_zero(const struct ecc_point *point) +{ + return (vli_is_zero(point->x) && vli_is_zero(point->y)); +} + +/* Point multiplication algorithm using Montgomery's ladder with co-Z + * coordinates. From http://eprint.iacr.org/2011/338.pdf + */ + +/* Double in place */ +static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1) +{ + /* t1 = x, t2 = y, t3 = z */ + u64 t4[NUM_ECC_DIGITS]; + u64 t5[NUM_ECC_DIGITS]; + + if (vli_is_zero(z1)) + return; + + vli_mod_square_fast(t4, y1); /* t4 = y1^2 */ + vli_mod_mult_fast(t5, x1, t4); /* t5 = x1*y1^2 = A */ + vli_mod_square_fast(t4, t4); /* t4 = y1^4 */ + vli_mod_mult_fast(y1, y1, z1); /* t2 = y1*z1 = z3 */ + vli_mod_square_fast(z1, z1); /* t3 = z1^2 */ + + vli_mod_add(x1, x1, z1, curve_p); /* t1 = x1 + z1^2 */ + vli_mod_add(z1, z1, z1, curve_p); /* t3 = 2*z1^2 */ + vli_mod_sub(z1, x1, z1, curve_p); /* t3 = x1 - z1^2 */ + vli_mod_mult_fast(x1, x1, z1); /* t1 = x1^2 - z1^4 */ + + vli_mod_add(z1, x1, x1, curve_p); /* t3 = 2*(x1^2 - z1^4) */ + vli_mod_add(x1, x1, z1, curve_p); /* t1 = 3*(x1^2 - z1^4) */ + if (vli_test_bit(x1, 0)) { + u64 carry = vli_add(x1, x1, curve_p); + vli_rshift1(x1); + x1[NUM_ECC_DIGITS - 1] |= carry << 63; + } else { + vli_rshift1(x1); + } + /* t1 = 3/2*(x1^2 - z1^4) = B */ + + vli_mod_square_fast(z1, x1); /* t3 = B^2 */ + vli_mod_sub(z1, z1, t5, curve_p); /* t3 = B^2 - A */ + vli_mod_sub(z1, z1, t5, curve_p); /* t3 = B^2 - 2A = x3 */ + vli_mod_sub(t5, t5, z1, curve_p); /* t5 = A - x3 */ + vli_mod_mult_fast(x1, x1, t5); /* t1 = B * (A - x3) */ + vli_mod_sub(t4, x1, t4, curve_p); /* t4 = B * (A - x3) - y1^4 = y3 */ + + vli_set(x1, z1); + vli_set(z1, y1); + vli_set(y1, t4); +} + +/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */ +static void apply_z(u64 *x1, u64 *y1, u64 *z) +{ + u64 t1[NUM_ECC_DIGITS]; + + vli_mod_square_fast(t1, z); /* z^2 */ + vli_mod_mult_fast(x1, x1, t1); /* x1 * z^2 */ + vli_mod_mult_fast(t1, t1, z); /* z^3 */ + vli_mod_mult_fast(y1, y1, t1); /* y1 * z^3 */ +} + +/* P = (x1, y1) => 2P, (x2, y2) => P' */ +static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2, + u64 *p_initial_z) +{ + u64 z[NUM_ECC_DIGITS]; + + vli_set(x2, x1); + vli_set(y2, y1); + + vli_clear(z); + z[0] = 1; + + if (p_initial_z) + vli_set(z, p_initial_z); + + apply_z(x1, y1, z); + + ecc_point_double_jacobian(x1, y1, z); + + apply_z(x2, y2, z); +} + +/* Input P = (x1, y1, Z), Q = (x2, y2, Z) + * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3) + * or P => P', Q => P + Q + */ +static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2) +{ + /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ + u64 t5[NUM_ECC_DIGITS]; + + vli_mod_sub(t5, x2, x1, curve_p); /* t5 = x2 - x1 */ + vli_mod_square_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */ + vli_mod_mult_fast(x1, x1, t5); /* t1 = x1*A = B */ + vli_mod_mult_fast(x2, x2, t5); /* t3 = x2*A = C */ + vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y2 - y1 */ + vli_mod_square_fast(t5, y2); /* t5 = (y2 - y1)^2 = D */ + + vli_mod_sub(t5, t5, x1, curve_p); /* t5 = D - B */ + vli_mod_sub(t5, t5, x2, curve_p); /* t5 = D - B - C = x3 */ + vli_mod_sub(x2, x2, x1, curve_p); /* t3 = C - B */ + vli_mod_mult_fast(y1, y1, x2); /* t2 = y1*(C - B) */ + vli_mod_sub(x2, x1, t5, curve_p); /* t3 = B - x3 */ + vli_mod_mult_fast(y2, y2, x2); /* t4 = (y2 - y1)*(B - x3) */ + vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y3 */ + + vli_set(x2, t5); +} + +/* Input P = (x1, y1, Z), Q = (x2, y2, Z) + * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3) + * or P => P - Q, Q => P + Q + */ +static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2) +{ + /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ + u64 t5[NUM_ECC_DIGITS]; + u64 t6[NUM_ECC_DIGITS]; + u64 t7[NUM_ECC_DIGITS]; + + vli_mod_sub(t5, x2, x1, curve_p); /* t5 = x2 - x1 */ + vli_mod_square_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */ + vli_mod_mult_fast(x1, x1, t5); /* t1 = x1*A = B */ + vli_mod_mult_fast(x2, x2, t5); /* t3 = x2*A = C */ + vli_mod_add(t5, y2, y1, curve_p); /* t4 = y2 + y1 */ + vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y2 - y1 */ + + vli_mod_sub(t6, x2, x1, curve_p); /* t6 = C - B */ + vli_mod_mult_fast(y1, y1, t6); /* t2 = y1 * (C - B) */ + vli_mod_add(t6, x1, x2, curve_p); /* t6 = B + C */ + vli_mod_square_fast(x2, y2); /* t3 = (y2 - y1)^2 */ + vli_mod_sub(x2, x2, t6, curve_p); /* t3 = x3 */ + + vli_mod_sub(t7, x1, x2, curve_p); /* t7 = B - x3 */ + vli_mod_mult_fast(y2, y2, t7); /* t4 = (y2 - y1)*(B - x3) */ + vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y3 */ + + vli_mod_square_fast(t7, t5); /* t7 = (y2 + y1)^2 = F */ + vli_mod_sub(t7, t7, t6, curve_p); /* t7 = x3' */ + vli_mod_sub(t6, t7, x1, curve_p); /* t6 = x3' - B */ + vli_mod_mult_fast(t6, t6, t5); /* t6 = (y2 + y1)*(x3' - B) */ + vli_mod_sub(y1, t6, y1, curve_p); /* t2 = y3' */ + + vli_set(x1, t7); +} + +static void ecc_point_mult(struct ecc_point *result, + const struct ecc_point *point, u64 *scalar, + u64 *initial_z, int num_bits) +{ + /* R0 and R1 */ + u64 rx[2][NUM_ECC_DIGITS]; + u64 ry[2][NUM_ECC_DIGITS]; + u64 z[NUM_ECC_DIGITS]; + int i, nb; + + vli_set(rx[1], point->x); + vli_set(ry[1], point->y); + + xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z); + + for (i = num_bits - 2; i > 0; i--) { + nb = !vli_test_bit(scalar, i); + xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb]); + xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb]); + } + + nb = !vli_test_bit(scalar, 0); + xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb]); + + /* Find final 1/Z value. */ + vli_mod_sub(z, rx[1], rx[0], curve_p); /* X1 - X0 */ + vli_mod_mult_fast(z, z, ry[1 - nb]); /* Yb * (X1 - X0) */ + vli_mod_mult_fast(z, z, point->x); /* xP * Yb * (X1 - X0) */ + vli_mod_inv(z, z, curve_p); /* 1 / (xP * Yb * (X1 - X0)) */ + vli_mod_mult_fast(z, z, point->y); /* yP / (xP * Yb * (X1 - X0)) */ + vli_mod_mult_fast(z, z, rx[1 - nb]); /* Xb * yP / (xP * Yb * (X1 - X0)) */ + /* End 1/Z calculation */ + + xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb]); + + apply_z(rx[0], ry[0], z); + + vli_set(result->x, rx[0]); + vli_set(result->y, ry[0]); +} + +static void ecc_bytes2native(const u8 bytes[ECC_BYTES], + u64 native[NUM_ECC_DIGITS]) +{ + int i; + + for (i = 0; i < NUM_ECC_DIGITS; i++) { + const u8 *digit = bytes + 8 * (NUM_ECC_DIGITS - 1 - i); + + native[NUM_ECC_DIGITS - 1 - i] = + ((u64) digit[0] << 0) | + ((u64) digit[1] << 8) | + ((u64) digit[2] << 16) | + ((u64) digit[3] << 24) | + ((u64) digit[4] << 32) | + ((u64) digit[5] << 40) | + ((u64) digit[6] << 48) | + ((u64) digit[7] << 56); + } +} + +static void ecc_native2bytes(const u64 native[NUM_ECC_DIGITS], + u8 bytes[ECC_BYTES]) +{ + int i; + + for (i = 0; i < NUM_ECC_DIGITS; i++) { + u8 *digit = bytes + 8 * (NUM_ECC_DIGITS - 1 - i); + + digit[0] = native[NUM_ECC_DIGITS - 1 - i] >> 0; + digit[1] = native[NUM_ECC_DIGITS - 1 - i] >> 8; + digit[2] = native[NUM_ECC_DIGITS - 1 - i] >> 16; + digit[3] = native[NUM_ECC_DIGITS - 1 - i] >> 24; + digit[4] = native[NUM_ECC_DIGITS - 1 - i] >> 32; + digit[5] = native[NUM_ECC_DIGITS - 1 - i] >> 40; + digit[6] = native[NUM_ECC_DIGITS - 1 - i] >> 48; + digit[7] = native[NUM_ECC_DIGITS - 1 - i] >> 56; + } +} + +bool ecc_make_key(u8 public_key[64], u8 private_key[32]) +{ + struct ecc_point pk; + u64 priv[NUM_ECC_DIGITS]; + unsigned int tries = 0; + + do { + if (tries++ >= MAX_TRIES) + return false; + + get_random_bytes(priv, ECC_BYTES); + + if (vli_is_zero(priv)) + continue; + + /* Make sure the private key is in the range [1, n-1]. */ + if (vli_cmp(curve_n, priv) != 1) + continue; + + ecc_point_mult(&pk, &curve_g, priv, NULL, vli_num_bits(priv)); + } while (ecc_point_is_zero(&pk)); + + ecc_native2bytes(priv, private_key); + ecc_native2bytes(pk.x, public_key); + ecc_native2bytes(pk.y, &public_key[32]); + + return true; +} + +bool ecdh_shared_secret(const u8 public_key[64], const u8 private_key[32], + u8 secret[32]) +{ + u64 priv[NUM_ECC_DIGITS]; + u64 rand[NUM_ECC_DIGITS]; + struct ecc_point product, pk; + + get_random_bytes(rand, ECC_BYTES); + + ecc_bytes2native(public_key, pk.x); + ecc_bytes2native(&public_key[32], pk.y); + ecc_bytes2native(private_key, priv); + + ecc_point_mult(&product, &pk, priv, rand, vli_num_bits(priv)); + + ecc_native2bytes(product.x, secret); + + return !ecc_point_is_zero(&product); +} diff --git a/net/bluetooth/ecc.h b/net/bluetooth/ecc.h new file mode 100644 index 0000000000000000000000000000000000000000..8d6a2f4d1905f33df35e1d2380f7e49cfe0f6c5f --- /dev/null +++ b/net/bluetooth/ecc.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013, Kenneth MacKay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Create a public/private key pair. + * Outputs: + * public_key - Will be filled in with the public key. + * private_key - Will be filled in with the private key. + * + * Returns true if the key pair was generated successfully, false + * if an error occurred. The keys are with the LSB first. + */ +bool ecc_make_key(u8 public_key[64], u8 private_key[32]); + +/* Compute a shared secret given your secret key and someone else's + * public key. + * Note: It is recommended that you hash the result of ecdh_shared_secret + * before using it for symmetric encryption or HMAC. + * + * Inputs: + * public_key - The public key of the remote party + * private_key - Your private key. + * + * Outputs: + * secret - Will be filled in with the shared secret value. + * + * Returns true if the shared secret was generated successfully, false + * if an error occurred. Both input and output parameters are with the + * LSB first. + */ +bool ecdh_shared_secret(const u8 public_key[64], const u8 private_key[32], + u8 secret[32]); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index b9517bd171901e7c6a9395791f3ecfc7fbefdd6d..79d84b88b8f0a766c9d8db9e93f4ef35221031be 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -141,10 +141,11 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason) */ if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) { struct hci_dev *hdev = conn->hdev; - struct hci_cp_read_clock_offset cp; + struct hci_cp_read_clock_offset clkoff_cp; - cp.handle = cpu_to_le16(conn->handle); - hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(cp), &cp); + clkoff_cp.handle = cpu_to_le16(conn->handle); + hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp), + &clkoff_cp); } conn->state = BT_DISCONN; @@ -415,7 +416,7 @@ static void le_conn_timeout(struct work_struct *work) * happen with broken hardware or if low duty cycle was used * (which doesn't have a timeout of its own). */ - if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { + if (conn->role == HCI_ROLE_SLAVE) { u8 enable = 0x00; hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); @@ -448,6 +449,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, conn->io_capability = hdev->io_capability; conn->remote_auth = 0xff; conn->key_type = 0xff; + conn->rssi = HCI_RSSI_INVALID; conn->tx_power = HCI_TX_POWER_INVALID; conn->max_tx_power = HCI_TX_POWER_INVALID; @@ -517,7 +519,7 @@ int hci_conn_del(struct hci_conn *conn) /* Unacked frames */ hdev->acl_cnt += conn->sent; } else if (conn->type == LE_LINK) { - cancel_delayed_work_sync(&conn->le_conn_timeout); + cancel_delayed_work(&conn->le_conn_timeout); if (hdev->le_pkts) hdev->le_cnt += conn->sent; @@ -544,6 +546,9 @@ int hci_conn_del(struct hci_conn *conn) hci_conn_del_sysfs(conn); + if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) + hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); + hci_dev_put(hdev); hci_conn_put(conn); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index cb05d7f16a34acc0ca78958d3e5d612453f7d154..93f92a08550694dbe3791109631ab4058afe13ca 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -200,31 +200,6 @@ static const struct file_operations blacklist_fops = { .release = single_release, }; -static int whitelist_show(struct seq_file *f, void *p) -{ - struct hci_dev *hdev = f->private; - struct bdaddr_list *b; - - hci_dev_lock(hdev); - list_for_each_entry(b, &hdev->whitelist, list) - seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); - hci_dev_unlock(hdev); - - return 0; -} - -static int whitelist_open(struct inode *inode, struct file *file) -{ - return single_open(file, whitelist_show, inode->i_private); -} - -static const struct file_operations whitelist_fops = { - .open = whitelist_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int uuids_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; @@ -299,15 +274,13 @@ static const struct file_operations inquiry_cache_fops = { static int link_keys_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; - struct list_head *p, *n; + struct link_key *key; - hci_dev_lock(hdev); - list_for_each_safe(p, n, &hdev->link_keys) { - struct link_key *key = list_entry(p, struct link_key, list); + rcu_read_lock(); + list_for_each_entry_rcu(key, &hdev->link_keys, list) seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type, HCI_LINK_KEY_SIZE, key->val, key->pin_len); - } - hci_dev_unlock(hdev); + rcu_read_unlock(); return 0; } @@ -433,6 +406,49 @@ static const struct file_operations force_sc_support_fops = { .llseek = default_llseek, }; +static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_lesc_support_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[32]; + size_t buf_size = min(count, (sizeof(buf)-1)); + bool enable; + + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + if (strtobool(buf, &enable)) + return -EINVAL; + + if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags)) + return -EALREADY; + + change_bit(HCI_FORCE_LESC, &hdev->dbg_flags); + + return count; +} + +static const struct file_operations force_lesc_support_fops = { + .open = simple_open, + .read = force_lesc_support_read, + .write = force_lesc_support_write, + .llseek = default_llseek, +}; + static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -773,16 +789,15 @@ static const struct file_operations white_list_fops = { static int identity_resolving_keys_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; - struct list_head *p, *n; + struct smp_irk *irk; - hci_dev_lock(hdev); - list_for_each_safe(p, n, &hdev->identity_resolving_keys) { - struct smp_irk *irk = list_entry(p, struct smp_irk, list); + rcu_read_lock(); + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &irk->bdaddr, irk->addr_type, 16, irk->val, &irk->rpa); } - hci_dev_unlock(hdev); + rcu_read_unlock(); return 0; } @@ -803,17 +818,15 @@ static const struct file_operations identity_resolving_keys_fops = { static int long_term_keys_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; - struct list_head *p, *n; + struct smp_ltk *ltk; - hci_dev_lock(hdev); - list_for_each_safe(p, n, &hdev->long_term_keys) { - struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list); + rcu_read_lock(); + list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list) seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n", <k->bdaddr, ltk->bdaddr_type, ltk->authenticated, ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv), __le64_to_cpu(ltk->rand), 16, ltk->val); - } - hci_dev_unlock(hdev); + rcu_read_unlock(); return 0; } @@ -1030,10 +1043,13 @@ static int device_list_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; struct hci_conn_params *p; + struct bdaddr_list *b; hci_dev_lock(hdev); + list_for_each_entry(b, &hdev->whitelist, list) + seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); list_for_each_entry(p, &hdev->le_conn_params, list) { - seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type, + seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type, p->auto_connect); } hci_dev_unlock(hdev); @@ -1147,13 +1163,16 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, hdev->req_status = HCI_REQ_PEND; - err = hci_req_run(&req, hci_req_sync_complete); - if (err < 0) - return ERR_PTR(err); - add_wait_queue(&hdev->req_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); + err = hci_req_run(&req, hci_req_sync_complete); + if (err < 0) { + remove_wait_queue(&hdev->req_wait_q, &wait); + set_current_state(TASK_RUNNING); + return ERR_PTR(err); + } + schedule_timeout(timeout); remove_wait_queue(&hdev->req_wait_q, &wait); @@ -1211,10 +1230,16 @@ static int __hci_req_sync(struct hci_dev *hdev, func(&req, opt); + add_wait_queue(&hdev->req_wait_q, &wait); + set_current_state(TASK_INTERRUPTIBLE); + err = hci_req_run(&req, hci_req_sync_complete); if (err < 0) { hdev->req_status = 0; + remove_wait_queue(&hdev->req_wait_q, &wait); + set_current_state(TASK_RUNNING); + /* ENODATA means the HCI request command queue is empty. * This can happen when a request with conditionals doesn't * trigger any commands to be sent. This is normal behavior @@ -1226,9 +1251,6 @@ static int __hci_req_sync(struct hci_dev *hdev, return err; } - add_wait_queue(&hdev->req_wait_q, &wait); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(timeout); remove_wait_queue(&hdev->req_wait_q, &wait); @@ -1713,6 +1735,28 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt) * Parameter Request */ + /* If the controller supports Extended Scanner Filter + * Policies, enable the correspondig event. + */ + if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) + events[1] |= 0x04; /* LE Direct Advertising + * Report + */ + + /* If the controller supports the LE Read Local P-256 + * Public Key command, enable the corresponding event. + */ + if (hdev->commands[34] & 0x02) + events[0] |= 0x80; /* LE Read Local P-256 + * Public Key Complete + */ + + /* If the controller supports the LE Generate DHKey + * command, enable the corresponding event. + */ + if (hdev->commands[34] & 0x04) + events[1] |= 0x01; /* LE Generate DHKey Complete */ + hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events); @@ -1755,9 +1799,7 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); /* Enable Secure Connections if supported and configured */ - if ((lmp_sc_capable(hdev) || - test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) && - test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { + if (bredr_sc_enabled(hdev)) { u8 support = 0x01; hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, sizeof(support), &support); @@ -1811,10 +1853,10 @@ static int __hci_init(struct hci_dev *hdev) &hdev->manufacturer); debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver); debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); + debugfs_create_file("device_list", 0444, hdev->debugfs, hdev, + &device_list_fops); debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev, &blacklist_fops); - debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev, - &whitelist_fops); debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev, @@ -1840,6 +1882,10 @@ static int __hci_init(struct hci_dev *hdev) hdev, &force_sc_support_fops); debugfs_create_file("sc_only_mode", 0444, hdev->debugfs, hdev, &sc_only_mode_fops); + if (lmp_le_capable(hdev)) + debugfs_create_file("force_lesc_support", 0644, + hdev->debugfs, hdev, + &force_lesc_support_fops); } if (lmp_sniff_capable(hdev)) { @@ -1893,8 +1939,6 @@ static int __hci_init(struct hci_dev *hdev) hdev, &adv_min_interval_fops); debugfs_create_file("adv_max_interval", 0644, hdev->debugfs, hdev, &adv_max_interval_fops); - debugfs_create_file("device_list", 0444, hdev->debugfs, hdev, - &device_list_fops); debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs, &hdev->discov_interleaved_timeout); @@ -2138,7 +2182,7 @@ u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, BT_DBG("cache %p, %pMR", cache, &data->bdaddr); - hci_remove_remote_oob_data(hdev, &data->bdaddr); + hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); if (!data->ssp_mode) flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; @@ -2584,6 +2628,11 @@ static int hci_dev_do_close(struct hci_dev *hdev) if (test_bit(HCI_MGMT, &hdev->dev_flags)) cancel_delayed_work_sync(&hdev->rpa_expired); + /* Avoid potential lockdep warnings from the *_flush() calls by + * ensuring the workqueue is empty up front. + */ + drain_workqueue(hdev->workqueue); + hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_pend_le_actions_clear(hdev); @@ -2707,6 +2756,11 @@ int hci_dev_reset(__u16 dev) skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); + /* Avoid potential lockdep warnings from the *_flush() calls by + * ensuring the workqueue is empty up front. + */ + drain_workqueue(hdev->workqueue); + hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); @@ -3112,35 +3166,31 @@ void hci_uuids_clear(struct hci_dev *hdev) void hci_link_keys_clear(struct hci_dev *hdev) { - struct list_head *p, *n; - - list_for_each_safe(p, n, &hdev->link_keys) { - struct link_key *key; - - key = list_entry(p, struct link_key, list); + struct link_key *key; - list_del(p); - kfree(key); + list_for_each_entry_rcu(key, &hdev->link_keys, list) { + list_del_rcu(&key->list); + kfree_rcu(key, rcu); } } void hci_smp_ltks_clear(struct hci_dev *hdev) { - struct smp_ltk *k, *tmp; + struct smp_ltk *k; - list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { - list_del(&k->list); - kfree(k); + list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + list_del_rcu(&k->list); + kfree_rcu(k, rcu); } } void hci_smp_irks_clear(struct hci_dev *hdev) { - struct smp_irk *k, *tmp; + struct smp_irk *k; - list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { - list_del(&k->list); - kfree(k); + list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { + list_del_rcu(&k->list); + kfree_rcu(k, rcu); } } @@ -3148,9 +3198,14 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *k; - list_for_each_entry(k, &hdev->link_keys, list) - if (bacmp(bdaddr, &k->bdaddr) == 0) + rcu_read_lock(); + list_for_each_entry_rcu(k, &hdev->link_keys, list) { + if (bacmp(bdaddr, &k->bdaddr) == 0) { + rcu_read_unlock(); return k; + } + } + rcu_read_unlock(); return NULL; } @@ -3174,6 +3229,10 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, if (!conn) return true; + /* BR/EDR key derived using SC from an LE link */ + if (conn->type == LE_LINK) + return true; + /* Neither local nor remote side had no-bonding as requirement */ if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) return true; @@ -3199,34 +3258,22 @@ static u8 ltk_role(u8 type) return HCI_ROLE_SLAVE; } -struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, - u8 role) +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 role) { struct smp_ltk *k; - list_for_each_entry(k, &hdev->long_term_keys, list) { - if (k->ediv != ediv || k->rand != rand) - continue; - - if (ltk_role(k->type) != role) + rcu_read_lock(); + list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) continue; - return k; - } - - return NULL; -} - -struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 addr_type, u8 role) -{ - struct smp_ltk *k; - - list_for_each_entry(k, &hdev->long_term_keys, list) - if (addr_type == k->bdaddr_type && - bacmp(bdaddr, &k->bdaddr) == 0 && - ltk_role(k->type) == role) + if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { + rcu_read_unlock(); return k; + } + } + rcu_read_unlock(); return NULL; } @@ -3235,17 +3282,22 @@ struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) { struct smp_irk *irk; - list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { - if (!bacmp(&irk->rpa, rpa)) + rcu_read_lock(); + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { + if (!bacmp(&irk->rpa, rpa)) { + rcu_read_unlock(); return irk; + } } - list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (smp_irk_matches(hdev, irk->val, rpa)) { bacpy(&irk->rpa, rpa); + rcu_read_unlock(); return irk; } } + rcu_read_unlock(); return NULL; } @@ -3259,11 +3311,15 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) return NULL; - list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { + rcu_read_lock(); + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (addr_type == irk->addr_type && - bacmp(bdaddr, &irk->bdaddr) == 0) + bacmp(bdaddr, &irk->bdaddr) == 0) { + rcu_read_unlock(); return irk; + } } + rcu_read_unlock(); return NULL; } @@ -3284,7 +3340,7 @@ struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; - list_add(&key->list, &hdev->link_keys); + list_add_rcu(&key->list, &hdev->link_keys); } BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); @@ -3322,14 +3378,14 @@ struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, struct smp_ltk *key, *old_key; u8 role = ltk_role(type); - old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role); + old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); if (old_key) key = old_key; else { key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; - list_add(&key->list, &hdev->long_term_keys); + list_add_rcu(&key->list, &hdev->long_term_keys); } bacpy(&key->bdaddr, bdaddr); @@ -3358,7 +3414,7 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, bacpy(&irk->bdaddr, bdaddr); irk->addr_type = addr_type; - list_add(&irk->list, &hdev->identity_resolving_keys); + list_add_rcu(&irk->list, &hdev->identity_resolving_keys); } memcpy(irk->val, val, 16); @@ -3377,25 +3433,25 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) BT_DBG("%s removing %pMR", hdev->name, bdaddr); - list_del(&key->list); - kfree(key); + list_del_rcu(&key->list); + kfree_rcu(key, rcu); return 0; } int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { - struct smp_ltk *k, *tmp; + struct smp_ltk *k; int removed = 0; - list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { + list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) continue; BT_DBG("%s removing %pMR", hdev->name, bdaddr); - list_del(&k->list); - kfree(k); + list_del_rcu(&k->list); + kfree_rcu(k, rcu); removed++; } @@ -3404,16 +3460,16 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { - struct smp_irk *k, *tmp; + struct smp_irk *k; - list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { + list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) continue; BT_DBG("%s removing %pMR", hdev->name, bdaddr); - list_del(&k->list); - kfree(k); + list_del_rcu(&k->list); + kfree_rcu(k, rcu); } } @@ -3437,26 +3493,31 @@ static void hci_cmd_timeout(struct work_struct *work) } struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, - bdaddr_t *bdaddr) + bdaddr_t *bdaddr, u8 bdaddr_type) { struct oob_data *data; - list_for_each_entry(data, &hdev->remote_oob_data, list) - if (bacmp(bdaddr, &data->bdaddr) == 0) - return data; + list_for_each_entry(data, &hdev->remote_oob_data, list) { + if (bacmp(bdaddr, &data->bdaddr) != 0) + continue; + if (data->bdaddr_type != bdaddr_type) + continue; + return data; + } return NULL; } -int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr) +int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type) { struct oob_data *data; - data = hci_find_remote_oob_data(hdev, bdaddr); + data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) return -ENOENT; - BT_DBG("%s removing %pMR", hdev->name, bdaddr); + BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); list_del(&data->list); kfree(data); @@ -3475,52 +3536,37 @@ void hci_remote_oob_data_clear(struct hci_dev *hdev) } int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 *hash, u8 *randomizer) + u8 bdaddr_type, u8 *hash192, u8 *rand192, + u8 *hash256, u8 *rand256) { struct oob_data *data; - data = hci_find_remote_oob_data(hdev, bdaddr); + data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) { data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; bacpy(&data->bdaddr, bdaddr); + data->bdaddr_type = bdaddr_type; list_add(&data->list, &hdev->remote_oob_data); } - memcpy(data->hash192, hash, sizeof(data->hash192)); - memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192)); - - memset(data->hash256, 0, sizeof(data->hash256)); - memset(data->randomizer256, 0, sizeof(data->randomizer256)); - - BT_DBG("%s for %pMR", hdev->name, bdaddr); - - return 0; -} - -int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 *hash192, u8 *randomizer192, - u8 *hash256, u8 *randomizer256) -{ - struct oob_data *data; - - data = hci_find_remote_oob_data(hdev, bdaddr); - if (!data) { - data = kmalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - bacpy(&data->bdaddr, bdaddr); - list_add(&data->list, &hdev->remote_oob_data); + if (hash192 && rand192) { + memcpy(data->hash192, hash192, sizeof(data->hash192)); + memcpy(data->rand192, rand192, sizeof(data->rand192)); + } else { + memset(data->hash192, 0, sizeof(data->hash192)); + memset(data->rand192, 0, sizeof(data->rand192)); } - memcpy(data->hash192, hash192, sizeof(data->hash192)); - memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192)); - - memcpy(data->hash256, hash256, sizeof(data->hash256)); - memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256)); + if (hash256 && rand256) { + memcpy(data->hash256, hash256, sizeof(data->hash256)); + memcpy(data->rand256, rand256, sizeof(data->rand256)); + } else { + memset(data->hash256, 0, sizeof(data->hash256)); + memset(data->rand256, 0, sizeof(data->rand256)); + } BT_DBG("%s for %pMR", hdev->name, bdaddr); @@ -4220,6 +4266,7 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_remote_oob_data_clear(hdev); hci_bdaddr_list_clear(&hdev->le_white_list); hci_conn_params_clear_all(hdev); + hci_discovery_filter_clear(hdev); hci_dev_unlock(hdev); hci_dev_put(hdev); @@ -4244,6 +4291,24 @@ int hci_resume_dev(struct hci_dev *hdev) } EXPORT_SYMBOL(hci_resume_dev); +/* Reset HCI device */ +int hci_reset_dev(struct hci_dev *hdev) +{ + const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; + struct sk_buff *skb; + + skb = bt_skb_alloc(3, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + bt_cb(skb)->pkt_type = HCI_EVENT_PKT; + memcpy(skb_put(skb, 3), hw_err, 3); + + /* Send Hardware Error to upper stack */ + return hci_recv_frame(hdev, skb); +} +EXPORT_SYMBOL(hci_reset_dev); + /* Receive frame from HCI drivers */ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) { @@ -4477,7 +4542,7 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete) BT_DBG("length %u", skb_queue_len(&req->cmd_q)); - /* If an error occured during request building, remove all HCI + /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. */ if (req->err) { @@ -4546,7 +4611,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, return -ENOMEM; } - /* Stand-alone HCI commands must be flaged as + /* Stand-alone HCI commands must be flagged as * single-command requests. */ bt_cb(skb)->req.start = true; @@ -4566,7 +4631,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); - /* If an error occured during request building, there is no point in + /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. */ if (req->err) @@ -4661,8 +4726,12 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, skb_shinfo(skb)->frag_list = NULL; - /* Queue all fragments atomically */ - spin_lock(&queue->lock); + /* Queue all fragments atomically. We need to use spin_lock_bh + * here because of 6LoWPAN links, as there this function is + * called from softirq and using normal spin lock could cause + * deadlocks. + */ + spin_lock_bh(&queue->lock); __skb_queue_tail(queue, skb); @@ -4679,7 +4748,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, __skb_queue_tail(queue, skb); } while (list); - spin_unlock(&queue->lock); + spin_unlock_bh(&queue->lock); } } @@ -5570,6 +5639,19 @@ void hci_req_add_le_passive_scan(struct hci_request *req) */ filter_policy = update_white_list(req); + /* When the controller is using random resolvable addresses and + * with that having LE privacy enabled, then controllers with + * Extended Scanner Filter Policies support can now enable support + * for handling directed advertising. + * + * So instead of using filter polices 0x00 (no whitelist) + * and 0x01 (whitelist enabled) use the new filter policies + * 0x02 (no whitelist) and 0x03 (whitelist enabled). + */ + if (test_bit(HCI_PRIVACY, &hdev->dev_flags) && + (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) + filter_policy |= 0x02; + memset(¶m_cp, 0, sizeof(param_cp)); param_cp.type = LE_SCAN_PASSIVE; param_cp.interval = cpu_to_le16(hdev->le_scan_interval); @@ -5621,6 +5703,15 @@ void hci_update_background_scan(struct hci_dev *hdev) if (hdev->discovery.state != DISCOVERY_STOPPED) return; + /* Reset RSSI and UUID filters when starting background scanning + * since these filters are meant for service discovery only. + * + * The Start Discovery and Start Service Discovery operations + * ensure to set proper values for RSSI threshold and UUID + * filter list. So it is safe to just reset them here. + */ + hci_discovery_filter_clear(hdev); + hci_req_init(&req, hdev); if (list_empty(&hdev->pend_le_conns) && diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 8b0a2a6de4199cea5610890a887296d04dec3eca..322abbbbcef991a36adaaf0750c723ceebba3147 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -189,6 +189,9 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) clear_bit(HCI_RESET, &hdev->flags); + if (status) + return; + /* Reset all non-persistent flags */ hdev->dev_flags &= ~HCI_PERSISTENT_MASK; @@ -205,6 +208,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) hdev->le_scan_type = LE_SCAN_PASSIVE; hdev->ssp_debug_mode = 0; + + hci_bdaddr_list_clear(&hdev->le_white_list); } static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) @@ -989,8 +994,8 @@ static void hci_cc_read_local_oob_data(struct hci_dev *hdev, BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); hci_dev_lock(hdev); - mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer, - NULL, NULL, rp->status); + mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL, + rp->status); hci_dev_unlock(hdev); } @@ -1002,8 +1007,8 @@ static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); hci_dev_lock(hdev); - mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192, - rp->hash256, rp->randomizer256, + mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192, + rp->hash256, rp->rand256, rp->status); hci_dev_unlock(hdev); } @@ -1045,7 +1050,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - /* If we're doing connection initation as peripheral. Set a + /* If we're doing connection initiation as peripheral. Set a * timeout in case something goes wrong. */ if (*sent) { @@ -1576,9 +1581,15 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; - if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name, - name_len, conn->dev_class); + /* Update the mgmt connected state if necessary. Be careful with + * conn objects that exist but are not (yet) connected however. + * Only those in BT_CONFIG or BT_CONNECTED states can be + * considered connected. + */ + if (conn && + (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && + !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + mgmt_device_connected(hdev, conn, 0, name, name_len); if (discov->state == DISCOVERY_STOPPED) return; @@ -1943,6 +1954,29 @@ unlock: hci_dev_unlock(hdev); } +static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_switch_role *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); + if (conn) + clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); + + hci_dev_unlock(hdev); +} + static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); @@ -2009,13 +2043,14 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) data.pscan_mode = info->pscan_mode; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; - data.rssi = 0x00; + data.rssi = HCI_RSSI_INVALID; data.ssp_mode = 0x00; flags = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, - info->dev_class, 0, flags, NULL, 0, NULL, 0); + info->dev_class, HCI_RSSI_INVALID, + flags, NULL, 0, NULL, 0); } hci_dev_unlock(hdev); @@ -2536,9 +2571,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev, cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_connected(hdev, &conn->dst, conn->type, - conn->dst_type, 0, NULL, 0, - conn->dev_class); + mgmt_device_connected(hdev, conn, 0, NULL, 0); if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; @@ -2848,6 +2881,10 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_cs_create_conn(hdev, ev->status); break; + case HCI_OP_DISCONNECT: + hci_cs_disconnect(hdev, ev->status); + break; + case HCI_OP_ADD_SCO: hci_cs_add_sco(hdev, ev->status); break; @@ -2876,24 +2913,24 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_cs_setup_sync_conn(hdev, ev->status); break; - case HCI_OP_SNIFF_MODE: - hci_cs_sniff_mode(hdev, ev->status); + case HCI_OP_CREATE_PHY_LINK: + hci_cs_create_phylink(hdev, ev->status); break; - case HCI_OP_EXIT_SNIFF_MODE: - hci_cs_exit_sniff_mode(hdev, ev->status); + case HCI_OP_ACCEPT_PHY_LINK: + hci_cs_accept_phylink(hdev, ev->status); break; - case HCI_OP_DISCONNECT: - hci_cs_disconnect(hdev, ev->status); + case HCI_OP_SNIFF_MODE: + hci_cs_sniff_mode(hdev, ev->status); break; - case HCI_OP_CREATE_PHY_LINK: - hci_cs_create_phylink(hdev, ev->status); + case HCI_OP_EXIT_SNIFF_MODE: + hci_cs_exit_sniff_mode(hdev, ev->status); break; - case HCI_OP_ACCEPT_PHY_LINK: - hci_cs_accept_phylink(hdev, ev->status); + case HCI_OP_SWITCH_ROLE: + hci_cs_switch_role(hdev, ev->status); break; case HCI_OP_LE_CREATE_CONN: @@ -2923,6 +2960,13 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) } } +static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_hardware_error *ev = (void *) skb->data; + + BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code); +} + static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_role_change *ev = (void *) skb->data; @@ -3148,6 +3192,38 @@ unlock: hci_dev_unlock(hdev); } +static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) +{ + if (key_type == HCI_LK_CHANGED_COMBINATION) + return; + + conn->pin_length = pin_len; + conn->key_type = key_type; + + switch (key_type) { + case HCI_LK_LOCAL_UNIT: + case HCI_LK_REMOTE_UNIT: + case HCI_LK_DEBUG_COMBINATION: + return; + case HCI_LK_COMBINATION: + if (pin_len == 16) + conn->pending_sec_level = BT_SECURITY_HIGH; + else + conn->pending_sec_level = BT_SECURITY_MEDIUM; + break; + case HCI_LK_UNAUTH_COMBINATION_P192: + case HCI_LK_UNAUTH_COMBINATION_P256: + conn->pending_sec_level = BT_SECURITY_MEDIUM; + break; + case HCI_LK_AUTH_COMBINATION_P192: + conn->pending_sec_level = BT_SECURITY_HIGH; + break; + case HCI_LK_AUTH_COMBINATION_P256: + conn->pending_sec_level = BT_SECURITY_FIPS; + break; + } +} + static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_link_key_req *ev = (void *) skb->data; @@ -3174,6 +3250,8 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { + clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); + if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || key->type == HCI_LK_UNAUTH_COMBINATION_P256) && conn->auth_type != 0xff && (conn->auth_type & 0x01)) { @@ -3189,8 +3267,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) goto not_found; } - conn->key_type = key->type; - conn->pin_length = key->pin_len; + conn_set_key(conn, key->type, key->pin_len); } bacpy(&cp.bdaddr, &ev->bdaddr); @@ -3220,16 +3297,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); - if (conn) { - hci_conn_hold(conn); - conn->disc_timeout = HCI_DISCONN_TIMEOUT; - pin_len = conn->pin_length; + if (!conn) + goto unlock; - if (ev->key_type != HCI_LK_CHANGED_COMBINATION) - conn->key_type = ev->key_type; + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_drop(conn); - hci_conn_drop(conn); - } + set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); + conn_set_key(conn, ev->key_type, conn->pin_length); if (!test_bit(HCI_MGMT, &hdev->dev_flags)) goto unlock; @@ -3239,6 +3315,12 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) if (!key) goto unlock; + /* Update connection information since adding the key will have + * fixed up the type in the case of changed combination keys. + */ + if (ev->key_type == HCI_LK_CHANGED_COMBINATION) + conn_set_key(conn, key->type, key->pin_len); + mgmt_new_link_key(hdev, key, persistent); /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag @@ -3248,15 +3330,16 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) */ if (key->type == HCI_LK_DEBUG_COMBINATION && !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) { - list_del(&key->list); - kfree(key); - } else if (conn) { - if (persistent) - clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); - else - set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); + list_del_rcu(&key->list); + kfree_rcu(key, rcu); + goto unlock; } + if (persistent) + clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); + else + set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); + unlock: hci_dev_unlock(hdev); } @@ -3434,9 +3517,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_connected(hdev, &conn->dst, conn->type, - conn->dst_type, 0, NULL, 0, - conn->dev_class); + mgmt_device_connected(hdev, conn, 0, NULL, 0); if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; @@ -3693,7 +3774,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) cp.authentication = conn->auth_type; - if (hci_find_remote_oob_data(hdev, &conn->dst) && + if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) && (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) cp.oob_data = 0x01; else @@ -3948,18 +4029,16 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, if (!test_bit(HCI_MGMT, &hdev->dev_flags)) goto unlock; - data = hci_find_remote_oob_data(hdev, &ev->bdaddr); + data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); if (data) { - if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { + if (bredr_sc_enabled(hdev)) { struct hci_cp_remote_oob_ext_data_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); - memcpy(cp.randomizer192, data->randomizer192, - sizeof(cp.randomizer192)); + memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); - memcpy(cp.randomizer256, data->randomizer256, - sizeof(cp.randomizer256)); + memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, sizeof(cp), &cp); @@ -3968,8 +4047,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, bacpy(&cp.bdaddr, &ev->bdaddr); memcpy(cp.hash, data->hash192, sizeof(cp.hash)); - memcpy(cp.randomizer, data->randomizer192, - sizeof(cp.randomizer)); + memcpy(cp.rand, data->rand192, sizeof(cp.rand)); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), &cp); @@ -4214,8 +4292,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) } if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_connected(hdev, &conn->dst, conn->type, - conn->dst_type, 0, NULL, 0, NULL); + mgmt_device_connected(hdev, conn, 0, NULL, 0); conn->sec_level = BT_SECURITY_LOW; conn->handle = __le16_to_cpu(ev->handle); @@ -4269,25 +4346,26 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, } /* This function requires the caller holds hdev->lock */ -static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, - u8 addr_type, u8 adv_type) +static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, + bdaddr_t *addr, + u8 addr_type, u8 adv_type) { struct hci_conn *conn; struct hci_conn_params *params; /* If the event is not connectable don't proceed further */ if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) - return; + return NULL; /* Ignore if the device is blocked */ if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type)) - return; + return NULL; /* Most controller will fail if we try to create new connections * while we have an existing one in slave role. */ if (hdev->conn_hash.le_num_slave > 0) - return; + return NULL; /* If we're not connectable only connect devices that we have in * our pend_le_conns list. @@ -4295,7 +4373,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type); if (!params) - return; + return NULL; switch (params->auto_connect) { case HCI_AUTO_CONN_DIRECT: @@ -4304,7 +4382,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, * incoming connections from slave devices. */ if (adv_type != LE_ADV_DIRECT_IND) - return; + return NULL; break; case HCI_AUTO_CONN_ALWAYS: /* Devices advertising with ADV_IND or ADV_DIRECT_IND @@ -4315,7 +4393,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, */ break; default: - return; + return NULL; } conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, @@ -4328,7 +4406,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, * count consistent once the connection is established. */ params->conn = hci_conn_get(conn); - return; + return conn; } switch (PTR_ERR(conn)) { @@ -4341,17 +4419,48 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, break; default: BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); + return NULL; } + + return NULL; } static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, - u8 bdaddr_type, s8 rssi, u8 *data, u8 len) + u8 bdaddr_type, bdaddr_t *direct_addr, + u8 direct_addr_type, s8 rssi, u8 *data, u8 len) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; + struct hci_conn *conn; bool match; u32 flags; + /* If the direct address is present, then this report is from + * a LE Direct Advertising Report event. In that case it is + * important to see if the address is matching the local + * controller address. + */ + if (direct_addr) { + /* Only resolvable random addresses are valid for these + * kind of reports and others can be ignored. + */ + if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) + return; + + /* If the controller is not using resolvable random + * addresses, then this report can be ignored. + */ + if (!test_bit(HCI_PRIVACY, &hdev->dev_flags)) + return; + + /* If the local IRK of the controller does not match + * with the resolvable random address provided, then + * this report can be ignored. + */ + if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) + return; + } + /* Check if we need to convert to identity address */ irk = hci_get_irk(hdev, bdaddr, bdaddr_type); if (irk) { @@ -4360,7 +4469,14 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, } /* Check if we have been requested to connect to this device */ - check_pending_le_conn(hdev, bdaddr, bdaddr_type, type); + conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type); + if (conn && type == LE_ADV_IND) { + /* Store report for later inclusion by + * mgmt_device_connected + */ + memcpy(conn->le_adv_data, data, len); + conn->le_adv_data_len = len; + } /* Passive scanning shouldn't trigger any device found events, * except for devices marked as CONN_REPORT for which we do send @@ -4481,7 +4597,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, - ev->bdaddr_type, rssi, ev->data, ev->length); + ev->bdaddr_type, NULL, 0, rssi, + ev->data, ev->length); ptr += sizeof(*ev) + ev->length + 1; } @@ -4505,10 +4622,20 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) if (conn == NULL) goto not_found; - ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role); - if (ltk == NULL) + ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); + if (!ltk) goto not_found; + if (smp_ltk_is_sc(ltk)) { + /* With SC both EDiv and Rand are set to zero */ + if (ev->ediv || ev->rand) + goto not_found; + } else { + /* For non-SC keys check that EDiv and Rand match */ + if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) + goto not_found; + } + memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); cp.handle = cpu_to_le16(conn->handle); @@ -4526,8 +4653,8 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) */ if (ltk->type == SMP_STK) { set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); - list_del(<k->list); - kfree(ltk); + list_del_rcu(<k->list); + kfree_rcu(ltk, rcu); } else { clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); } @@ -4612,6 +4739,27 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); } +static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, + struct sk_buff *skb) +{ + u8 num_reports = skb->data[0]; + void *ptr = &skb->data[1]; + + hci_dev_lock(hdev); + + while (num_reports--) { + struct hci_ev_le_direct_adv_info *ev = ptr; + + process_adv_report(hdev, ev->evt_type, &ev->bdaddr, + ev->bdaddr_type, &ev->direct_addr, + ev->direct_addr_type, ev->rssi, NULL, 0); + + ptr += sizeof(*ev); + } + + hci_dev_unlock(hdev); +} + static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_meta *le_ev = (void *) skb->data; @@ -4639,6 +4787,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_le_remote_conn_param_req_evt(hdev, skb); break; + case HCI_EV_LE_DIRECT_ADV_REPORT: + hci_le_direct_adv_report_evt(hdev, skb); + break; + default: break; } @@ -4735,6 +4887,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_cmd_status_evt(hdev, skb); break; + case HCI_EV_HARDWARE_ERROR: + hci_hardware_error_evt(hdev, skb); + break; + case HCI_EV_ROLE_CHANGE: hci_role_change_evt(hdev, skb); break; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 115f149362ba84d4b24b0cb217b49de3a5cf03bb..2c245fdf319a60022328e8f3918251db526f88f8 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -878,7 +878,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, } skb_reset_transport_header(skb); - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: @@ -947,7 +947,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, if (!skb) goto done; - if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + if (memcpy_from_msg(skb_put(skb, len), msg, len)) { err = -EFAULT; goto drop; } @@ -987,7 +987,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } else { - /* Stand-alone HCI commands must be flaged as + /* Stand-alone HCI commands must be flagged as * single-command requests. */ bt_cb(skb)->req.start = true; diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index 9332bc7aa851fb798533cd5695fd260de68c242f..bc8610b24077eb4bb917824eda7268c9a0cec5bd 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig @@ -1,6 +1,6 @@ config BT_HIDP tristate "HIDP protocol support" - depends on BT && INPUT + depends on BT_BREDR && INPUT select HID help HIDP (Human Interface Device Protocol) is a transport layer diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 1b7d605706aa5720b45e056d3a75f235c9c4ab60..cc25d0b74b3609ef800453484eef7e0f55767162 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -736,14 +736,10 @@ static int hidp_setup_hid(struct hidp_session *session, struct hid_device *hid; int err; - session->rd_data = kzalloc(req->rd_size, GFP_KERNEL); - if (!session->rd_data) - return -ENOMEM; + session->rd_data = memdup_user(req->rd_data, req->rd_size); + if (IS_ERR(session->rd_data)) + return PTR_ERR(session->rd_data); - if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) { - err = -EFAULT; - goto fault; - } session->rd_size = req->rd_size; hid = hid_allocate_device(); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index b6f9777e057da11105d63deabf9ebd0dc897436c..a2b6dfa38a0cfd7f020c9d6f5a1584a92ae5c6e6 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -46,7 +46,6 @@ bool disable_ertm; static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; -static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, }; static LIST_HEAD(chan_list); static DEFINE_RWLOCK(chan_list_lock); @@ -424,6 +423,9 @@ struct l2cap_chan *l2cap_chan_create(void) mutex_init(&chan->lock); + /* Set default lock nesting level */ + atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL); + write_lock(&chan_list_lock); list_add(&chan->global_l, &chan_list); write_unlock(&chan_list_lock); @@ -567,7 +569,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) __clear_chan_timer(chan); - BT_DBG("chan %p, conn %p, err %d", chan, conn, err); + BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err, + state_to_string(chan->state)); chan->ops->teardown(chan, err); @@ -836,7 +839,10 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, if (!skb) return; - if (lmp_no_flush_capable(conn->hcon->hdev)) + /* Use NO_FLUSH if supported or we have an LE link (which does + * not support auto-flushing packets) */ + if (lmp_no_flush_capable(conn->hcon->hdev) || + conn->hcon->type == LE_LINK) flags = ACL_START_NO_FLUSH; else flags = ACL_START; @@ -870,8 +876,13 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) return; } - if (!test_bit(FLAG_FLUSHABLE, &chan->flags) && - lmp_no_flush_capable(hcon->hdev)) + /* Use NO_FLUSH for LE links (where this is the only option) or + * if the BR/EDR link supports it and flushing has not been + * explicitly requested (through FLAG_FLUSHABLE). + */ + if (hcon->type == LE_LINK || + (!test_bit(FLAG_FLUSHABLE, &chan->flags) && + lmp_no_flush_capable(hcon->hdev))) flags = ACL_START_NO_FLUSH; else flags = ACL_START; @@ -1108,10 +1119,10 @@ static bool __amp_capable(struct l2cap_chan *chan) struct hci_dev *hdev; bool amp_available = false; - if (!conn->hs_enabled) + if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) return false; - if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP)) + if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP)) return false; read_lock(&hci_dev_list_lock); @@ -2092,8 +2103,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct sk_buff **frag; int sent = 0; - if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count), - msg->msg_iov, count)) + if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count) return -EFAULT; sent += count; @@ -2113,8 +2123,8 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, *frag = tmp; - if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count), - msg->msg_iov, count)) + if (copy_from_iter(skb_put(*frag, count), count, + &msg->msg_iter) != count) return -EFAULT; sent += count; @@ -3084,12 +3094,14 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) { - return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW; + return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && + (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)); } static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) { - return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW; + return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && + (conn->feat_mask & L2CAP_FEAT_EXT_FLOW)); } static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, @@ -3318,7 +3330,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) break; case L2CAP_CONF_EWS: - if (!chan->conn->hs_enabled) + if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP)) return -ECONNREFUSED; set_bit(FLAG_EXT_CTRL, &chan->flags); @@ -3873,9 +3885,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn, hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags) && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) - mgmt_device_connected(hdev, &hcon->dst, hcon->type, - hcon->dst_type, 0, NULL, 0, - hcon->dev_class); + mgmt_device_connected(hdev, hcon, 0, NULL, 0); hci_dev_unlock(hdev); l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); @@ -4084,7 +4094,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, chan->num_conf_req++; } - /* Got Conf Rsp PENDING from remote side and asume we sent + /* Got Conf Rsp PENDING from remote side and assume we sent Conf Rsp PENDING in the code above */ if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { @@ -4324,7 +4334,7 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; - if (conn->hs_enabled) + if (conn->local_fixed_chan & L2CAP_FC_A2MP) feat_mask |= L2CAP_FEAT_EXT_FLOW | L2CAP_FEAT_EXT_WINDOW; @@ -4335,14 +4345,10 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, u8 buf[12]; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; - if (conn->hs_enabled) - l2cap_fixed_chan[0] |= L2CAP_FC_A2MP; - else - l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; - rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); - memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); + rsp->data[0] = conn->local_fixed_chan; + memset(rsp->data + 1, 0, 7); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { @@ -4408,7 +4414,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, break; case L2CAP_IT_FIXED_CHAN: - conn->fixed_chan_mask = rsp->data[0]; + conn->remote_fixed_chan = rsp->data[0]; conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; @@ -4432,7 +4438,7 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn, if (cmd_len != sizeof(*req)) return -EPROTO; - if (!conn->hs_enabled) + if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) return -EINVAL; psm = le16_to_cpu(req->psm); @@ -4862,7 +4868,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn, BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id); - if (!conn->hs_enabled) + if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) return -EINVAL; chan = l2cap_get_chan_by_dcid(conn, icid); @@ -5217,9 +5223,10 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn, u8 *data) { struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; + struct hci_conn *hcon = conn->hcon; u16 dcid, mtu, mps, credits, result; struct l2cap_chan *chan; - int err; + int err, sec_level; if (cmd_len < sizeof(*rsp)) return -EPROTO; @@ -5258,6 +5265,26 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn, l2cap_chan_ready(chan); break; + case L2CAP_CR_AUTHENTICATION: + case L2CAP_CR_ENCRYPTION: + /* If we already have MITM protection we can't do + * anything. + */ + if (hcon->sec_level > BT_SECURITY_MEDIUM) { + l2cap_chan_del(chan, ECONNREFUSED); + break; + } + + sec_level = hcon->sec_level + 1; + if (chan->sec_level < sec_level) + chan->sec_level = sec_level; + + /* We'll need to send a new Connect Request */ + clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags); + + smp_conn_security(hcon, chan->sec_level); + break; + default: l2cap_chan_del(chan, ECONNREFUSED); break; @@ -5390,7 +5417,8 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); - if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) { + if (!smp_sufficient_security(conn->hcon, pchan->sec_level, + SMP_ALLOW_STK)) { result = L2CAP_CR_AUTHENTICATION; chan = NULL; goto response_unlock; @@ -5494,6 +5522,7 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn, if (credits > max_credits) { BT_ERR("LE credits overflow"); l2cap_send_disconn_req(chan, ECONNRESET); + l2cap_chan_unlock(chan); /* Return 0 so that we don't trigger an unnecessary * command reject packet. @@ -6931,9 +6960,15 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) conn->feat_mask = 0; - if (hcon->type == ACL_LINK) - conn->hs_enabled = test_bit(HCI_HS_ENABLED, - &hcon->hdev->dev_flags); + conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; + + if (hcon->type == ACL_LINK && + test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags)) + conn->local_fixed_chan |= L2CAP_FC_A2MP; + + if (bredr_sc_enabled(hcon->hdev) && + test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) + conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; mutex_init(&conn->ident_lock); mutex_init(&conn->chan_lock); @@ -7330,7 +7365,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); - } else if (chan->state == BT_CONNECT2) { + } else if (chan->state == BT_CONNECT2 && + chan->mode != L2CAP_MODE_LE_FLOWCTL) { struct l2cap_conn_rsp rsp; __u16 res, stat; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 31f106e61ca20fd15420830e653f81e7b1f87a20..f65caf41953f866fdea55b8eb342fafc9dc8a1c7 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -285,6 +285,12 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; + /* Listening channels need to use nested locking in order not to + * cause lockdep warnings when the created child channels end up + * being locked in the same thread as the parent channel. + */ + atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); + chan->state = BT_LISTEN; sk->sk_state = BT_LISTEN; @@ -301,7 +307,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, long timeo; int err = 0; - lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + lock_sock_nested(sk, L2CAP_NESTING_PARENT); timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); @@ -333,7 +339,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, release_sock(sk); timeo = schedule_timeout(timeo); - lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + lock_sock_nested(sk, L2CAP_NESTING_PARENT); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); @@ -1096,6 +1102,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) chan = l2cap_pi(sk)->chan; conn = chan->conn; + BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); + if (conn) mutex_lock(&conn->chan_lock); @@ -1153,12 +1161,16 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) { struct sock *sk; - BT_DBG("parent %p", parent); + BT_DBG("parent %p state %s", parent, + state_to_string(parent->sk_state)); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) { struct l2cap_chan *chan = l2cap_pi(sk)->chan; + BT_DBG("child chan %p state %s", chan, + state_to_string(chan->state)); + l2cap_chan_lock(chan); __clear_chan_timer(chan); l2cap_chan_close(chan, ECONNRESET); @@ -1246,7 +1258,16 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) struct sock *sk = chan->data; struct sock *parent; - lock_sock(sk); + BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); + + /* This callback can be called both for server (BT_LISTEN) + * sockets as well as "normal" ones. To avoid lockdep warnings + * with child socket locking (through l2cap_sock_cleanup_listen) + * we need separation into separate nesting levels. The simplest + * way to accomplish this is to inherit the nesting level used + * for the channel. + */ + lock_sock_nested(sk, atomic_read(&chan->nesting)); parent = bt_sk(sk)->parent; @@ -1315,13 +1336,6 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, return skb; } -static int l2cap_sock_memcpy_fromiovec_cb(struct l2cap_chan *chan, - unsigned char *kdata, - struct iovec *iov, int len) -{ - return memcpy_fromiovec(kdata, iov, len); -} - static void l2cap_sock_ready_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; @@ -1406,7 +1420,6 @@ static const struct l2cap_ops l2cap_chan_ops = { .set_shutdown = l2cap_sock_set_shutdown_cb, .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, .alloc_skb = l2cap_sock_alloc_skb_cb, - .memcpy_fromiovec = l2cap_sock_memcpy_fromiovec_cb, }; static void l2cap_sock_destruct(struct sock *sk) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index efb71b022ab6520527b3087dbf917ddcb925438e..7384f11613369b0997df0229ecc6d8ea2c80bb5b 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -35,7 +35,7 @@ #include "smp.h" #define MGMT_VERSION 1 -#define MGMT_REVISION 7 +#define MGMT_REVISION 8 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, @@ -93,6 +93,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_READ_CONFIG_INFO, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_OP_SET_PUBLIC_ADDRESS, + MGMT_OP_START_SERVICE_DISCOVERY, }; static const u16 mgmt_events[] = { @@ -134,8 +135,10 @@ struct pending_cmd { u16 opcode; int index; void *param; + size_t param_len; struct sock *sk; void *user_data; + void (*cmd_complete)(struct pending_cmd *cmd, u8 status); }; /* HCI to MGMT error code conversion table */ @@ -574,6 +577,7 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (lmp_le_capable(hdev)) { settings |= MGMT_SETTING_LE; settings |= MGMT_SETTING_ADVERTISING; + settings |= MGMT_SETTING_SECURE_CONN; settings |= MGMT_SETTING_PRIVACY; } @@ -1202,14 +1206,13 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, cmd->opcode = opcode; cmd->index = hdev->id; - cmd->param = kmalloc(len, GFP_KERNEL); + cmd->param = kmemdup(data, len, GFP_KERNEL); if (!cmd->param) { kfree(cmd); return NULL; } - if (data) - memcpy(cmd->param, data, len); + cmd->param_len = len; cmd->sk = sk; sock_hold(sk); @@ -1469,6 +1472,32 @@ static void cmd_status_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_remove(cmd); } +static void cmd_complete_rsp(struct pending_cmd *cmd, void *data) +{ + if (cmd->cmd_complete) { + u8 *status = data; + + cmd->cmd_complete(cmd, *status); + mgmt_pending_remove(cmd); + + return; + } + + cmd_status_rsp(cmd, data); +} + +static void generic_cmd_complete(struct pending_cmd *cmd, u8 status) +{ + cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, + cmd->param_len); +} + +static void addr_cmd_complete(struct pending_cmd *cmd, u8 status) +{ + cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, + sizeof(struct mgmt_addr_info)); +} + static u8 mgmt_bredr_support(struct hci_dev *hdev) { if (!lmp_bredr_capable(hdev)) @@ -2725,10 +2754,40 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, } if (cp->addr.type == BDADDR_BREDR) { + /* If disconnection is requested, then look up the + * connection. If the remote device is connected, it + * will be later used to terminate the link. + * + * Setting it to NULL explicitly will cause no + * termination of the link. + */ + if (cp->disconnect) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = NULL; + err = hci_remove_link_key(hdev, &cp->addr.bdaddr); } else { u8 addr_type; + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, + &cp->addr.bdaddr); + if (conn) { + /* Defer clearing up the connection parameters + * until closing to give a chance of keeping + * them if a repairing happens. + */ + set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); + + /* If disconnection is not requested, then + * clear the connection variable so that the + * link is not terminated. + */ + if (!cp->disconnect) + conn = NULL; + } + if (cp->addr.type == BDADDR_LE_PUBLIC) addr_type = ADDR_LE_DEV_PUBLIC; else @@ -2736,8 +2795,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type); - hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type); - err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type); } @@ -2747,17 +2804,9 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - if (cp->disconnect) { - if (cp->addr.type == BDADDR_BREDR) - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); - else - conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, - &cp->addr.bdaddr); - } else { - conn = NULL; - } - + /* If the connection variable is set, then termination of the + * link is requested. + */ if (!conn) { err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, &rp, sizeof(rp)); @@ -2772,6 +2821,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } + cmd->cmd_complete = addr_cmd_complete; + dc.handle = cpu_to_le16(conn->handle); dc.reason = 0x13; /* Remote User Terminated Connection */ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); @@ -2835,6 +2886,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } + cmd->cmd_complete = generic_cmd_complete; + err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM); if (err < 0) mgmt_pending_remove(cmd); @@ -2987,6 +3040,8 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } + cmd->cmd_complete = addr_cmd_complete; + bacpy(&reply.bdaddr, &cp->addr.bdaddr); reply.pin_len = cp->pin_len; memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code)); @@ -3062,6 +3117,11 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status) hci_conn_put(conn); mgmt_pending_remove(cmd); + + /* The device is paired so there is no need to remove + * its connection parameters anymore. + */ + clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); } void mgmt_smp_complete(struct hci_conn *conn, bool complete) @@ -3071,7 +3131,7 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete) cmd = find_pairing(conn); if (cmd) - pairing_complete(cmd, status); + cmd->cmd_complete(cmd, status); } static void pairing_complete_cb(struct hci_conn *conn, u8 status) @@ -3084,7 +3144,7 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status) if (!cmd) BT_DBG("Unable to find a pending command"); else - pairing_complete(cmd, mgmt_status(status)); + cmd->cmd_complete(cmd, mgmt_status(status)); } static void le_pairing_complete_cb(struct hci_conn *conn, u8 status) @@ -3100,7 +3160,7 @@ static void le_pairing_complete_cb(struct hci_conn *conn, u8 status) if (!cmd) BT_DBG("Unable to find a pending command"); else - pairing_complete(cmd, mgmt_status(status)); + cmd->cmd_complete(cmd, mgmt_status(status)); } static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, @@ -3197,6 +3257,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } + cmd->cmd_complete = pairing_complete; + /* For LE, just connecting isn't a proof that the pairing finished */ if (cp->addr.type == BDADDR_BREDR) { conn->connect_cfm_cb = pairing_complete_cb; @@ -3313,6 +3375,8 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, goto done; } + cmd->cmd_complete = addr_cmd_complete; + /* Continue with pairing via HCI */ if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { struct hci_cp_user_passkey_reply cp; @@ -3537,7 +3601,7 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) + if (bredr_sc_enabled(hdev)) err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL); else @@ -3564,8 +3628,17 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_add_remote_oob_data *cp = data; u8 status; + if (cp->addr.type != BDADDR_BREDR) { + err = cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, - cp->hash, cp->randomizer); + cp->addr.type, cp->hash, + cp->rand, NULL, NULL); if (err < 0) status = MGMT_STATUS_FAILED; else @@ -3575,13 +3648,28 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, status, &cp->addr, sizeof(cp->addr)); } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) { struct mgmt_cp_add_remote_oob_ext_data *cp = data; + u8 *rand192, *hash192; u8 status; - err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr, - cp->hash192, - cp->randomizer192, - cp->hash256, - cp->randomizer256); + if (cp->addr.type != BDADDR_BREDR) { + err = cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + + if (bdaddr_type_is_le(cp->addr.type)) { + rand192 = NULL; + hash192 = NULL; + } else { + rand192 = cp->rand192; + hash192 = cp->hash192; + } + + err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, + cp->addr.type, hash192, rand192, + cp->hash256, cp->rand256); if (err < 0) status = MGMT_STATUS_FAILED; else @@ -3595,6 +3683,7 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_INVALID_PARAMS); } +unlock: hci_dev_unlock(hdev); return err; } @@ -3608,14 +3697,26 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, BT_DBG("%s", hdev->name); + if (cp->addr.type != BDADDR_BREDR) + return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + hci_dev_lock(hdev); - err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr); + if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { + hci_remote_oob_data_clear(hdev); + status = MGMT_STATUS_SUCCESS; + goto done; + } + + err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type); if (err < 0) status = MGMT_STATUS_INVALID_PARAMS; else status = MGMT_STATUS_SUCCESS; +done: err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, status, &cp->addr, sizeof(cp->addr)); @@ -3623,64 +3724,150 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, return err; } -static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status) +static bool trigger_discovery(struct hci_request *req, u8 *status) { - struct pending_cmd *cmd; - u8 type; + struct hci_dev *hdev = req->hdev; + struct hci_cp_le_set_scan_param param_cp; + struct hci_cp_le_set_scan_enable enable_cp; + struct hci_cp_inquiry inq_cp; + /* General inquiry access code (GIAC) */ + u8 lap[3] = { 0x33, 0x8b, 0x9e }; + u8 own_addr_type; int err; - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + switch (hdev->discovery.type) { + case DISCOV_TYPE_BREDR: + *status = mgmt_bredr_support(hdev); + if (*status) + return false; - cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); - if (!cmd) - return -ENOENT; + if (test_bit(HCI_INQUIRY, &hdev->flags)) { + *status = MGMT_STATUS_BUSY; + return false; + } - type = hdev->discovery.type; + hci_inquiry_cache_flush(hdev); - err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status), - &type, sizeof(type)); - mgmt_pending_remove(cmd); + memset(&inq_cp, 0, sizeof(inq_cp)); + memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap)); + inq_cp.length = DISCOV_BREDR_INQUIRY_LEN; + hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp); + break; - return err; + case DISCOV_TYPE_LE: + case DISCOV_TYPE_INTERLEAVED: + *status = mgmt_le_support(hdev); + if (*status) + return false; + + if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && + !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { + *status = MGMT_STATUS_NOT_SUPPORTED; + return false; + } + + if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) { + /* Don't let discovery abort an outgoing + * connection attempt that's using directed + * advertising. + */ + if (hci_conn_hash_lookup_state(hdev, LE_LINK, + BT_CONNECT)) { + *status = MGMT_STATUS_REJECTED; + return false; + } + + disable_advertising(req); + } + + /* If controller is scanning, it means the background scanning + * is running. Thus, we should temporarily stop it in order to + * set the discovery scanning parameters. + */ + if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) + hci_req_add_le_scan_disable(req); + + memset(¶m_cp, 0, sizeof(param_cp)); + + /* All active scans will be done with either a resolvable + * private address (when privacy feature has been enabled) + * or unresolvable private address. + */ + err = hci_update_random_address(req, true, &own_addr_type); + if (err < 0) { + *status = MGMT_STATUS_FAILED; + return false; + } + + param_cp.type = LE_SCAN_ACTIVE; + param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT); + param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); + param_cp.own_address_type = own_addr_type; + hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), + ¶m_cp); + + memset(&enable_cp, 0, sizeof(enable_cp)); + enable_cp.enable = LE_SCAN_ENABLE; + enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), + &enable_cp); + break; + + default: + *status = MGMT_STATUS_INVALID_PARAMS; + return false; + } + + return true; } static void start_discovery_complete(struct hci_dev *hdev, u8 status) { - unsigned long timeout = 0; + struct pending_cmd *cmd; + unsigned long timeout; BT_DBG("status %d", status); + hci_dev_lock(hdev); + + cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); + if (!cmd) + cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); + + if (cmd) { + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); + } + if (status) { - hci_dev_lock(hdev); - mgmt_start_discovery_failed(hdev, status); - hci_dev_unlock(hdev); - return; + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + goto unlock; } - hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_FINDING); - hci_dev_unlock(hdev); switch (hdev->discovery.type) { case DISCOV_TYPE_LE: timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); break; - case DISCOV_TYPE_INTERLEAVED: timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); break; - case DISCOV_TYPE_BREDR: + timeout = 0; break; - default: BT_ERR("Invalid discovery type %d", hdev->discovery.type); + timeout = 0; + break; } - if (!timeout) - return; + if (timeout) + queue_delayed_work(hdev->workqueue, + &hdev->le_scan_disable, timeout); - queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout); +unlock: + hci_dev_unlock(hdev); } static int start_discovery(struct sock *sk, struct hci_dev *hdev, @@ -3688,13 +3875,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, { struct mgmt_cp_start_discovery *cp = data; struct pending_cmd *cmd; - struct hci_cp_le_set_scan_param param_cp; - struct hci_cp_le_set_scan_enable enable_cp; - struct hci_cp_inquiry inq_cp; struct hci_request req; - /* General inquiry access code (GIAC) */ - u8 lap[3] = { 0x33, 0x8b, 0x9e }; - u8 status, own_addr_type; + u8 status; int err; BT_DBG("%s", hdev->name); @@ -3702,176 +3884,188 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_NOT_POWERED); + err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, + MGMT_STATUS_NOT_POWERED, + &cp->type, sizeof(cp->type)); goto failed; } - if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) { - err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_BUSY); + if (hdev->discovery.state != DISCOVERY_STOPPED || + test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) { + err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, + MGMT_STATUS_BUSY, &cp->type, + sizeof(cp->type)); goto failed; } - if (hdev->discovery.state != DISCOVERY_STOPPED) { - err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_BUSY); - goto failed; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0); + cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } + cmd->cmd_complete = generic_cmd_complete; + + /* Clear the discovery filter first to free any previously + * allocated memory for the UUID list. + */ + hci_discovery_filter_clear(hdev); + hdev->discovery.type = cp->type; + hdev->discovery.report_invalid_rssi = false; hci_req_init(&req, hdev); - switch (hdev->discovery.type) { - case DISCOV_TYPE_BREDR: - status = mgmt_bredr_support(hdev); - if (status) { - err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - status); - mgmt_pending_remove(cmd); - goto failed; - } + if (!trigger_discovery(&req, &status)) { + err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, + status, &cp->type, sizeof(cp->type)); + mgmt_pending_remove(cmd); + goto failed; + } - if (test_bit(HCI_INQUIRY, &hdev->flags)) { - err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_BUSY); - mgmt_pending_remove(cmd); - goto failed; - } + err = hci_req_run(&req, start_discovery_complete); + if (err < 0) { + mgmt_pending_remove(cmd); + goto failed; + } - hci_inquiry_cache_flush(hdev); + hci_discovery_set_state(hdev, DISCOVERY_STARTING); - memset(&inq_cp, 0, sizeof(inq_cp)); - memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap)); - inq_cp.length = DISCOV_BREDR_INQUIRY_LEN; - hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp); - break; +failed: + hci_dev_unlock(hdev); + return err; +} - case DISCOV_TYPE_LE: - case DISCOV_TYPE_INTERLEAVED: - status = mgmt_le_support(hdev); - if (status) { - err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - status); - mgmt_pending_remove(cmd); - goto failed; - } +static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status) +{ + cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1); +} - if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && - !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { - err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_NOT_SUPPORTED); - mgmt_pending_remove(cmd); - goto failed; - } +static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_start_service_discovery *cp = data; + struct pending_cmd *cmd; + struct hci_request req; + const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16); + u16 uuid_count, expected_len; + u8 status; + int err; - if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) { - /* Don't let discovery abort an outgoing - * connection attempt that's using directed - * advertising. - */ - if (hci_conn_hash_lookup_state(hdev, LE_LINK, - BT_CONNECT)) { - err = cmd_status(sk, hdev->id, - MGMT_OP_START_DISCOVERY, - MGMT_STATUS_REJECTED); - mgmt_pending_remove(cmd); - goto failed; - } + BT_DBG("%s", hdev->name); - disable_advertising(&req); - } + hci_dev_lock(hdev); - /* If controller is scanning, it means the background scanning - * is running. Thus, we should temporarily stop it in order to - * set the discovery scanning parameters. - */ - if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) - hci_req_add_le_scan_disable(&req); + if (!hdev_is_powered(hdev)) { + err = cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_NOT_POWERED, + &cp->type, sizeof(cp->type)); + goto failed; + } - memset(¶m_cp, 0, sizeof(param_cp)); + if (hdev->discovery.state != DISCOVERY_STOPPED || + test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) { + err = cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_BUSY, &cp->type, + sizeof(cp->type)); + goto failed; + } - /* All active scans will be done with either a resolvable - * private address (when privacy feature has been enabled) - * or unresolvable private address. - */ - err = hci_update_random_address(&req, true, &own_addr_type); - if (err < 0) { - err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_FAILED); + uuid_count = __le16_to_cpu(cp->uuid_count); + if (uuid_count > max_uuid_count) { + BT_ERR("service_discovery: too big uuid_count value %u", + uuid_count); + err = cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_INVALID_PARAMS, &cp->type, + sizeof(cp->type)); + goto failed; + } + + expected_len = sizeof(*cp) + uuid_count * 16; + if (expected_len != len) { + BT_ERR("service_discovery: expected %u bytes, got %u bytes", + expected_len, len); + err = cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_INVALID_PARAMS, &cp->type, + sizeof(cp->type)); + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY, + hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + cmd->cmd_complete = service_discovery_cmd_complete; + + /* Clear the discovery filter first to free any previously + * allocated memory for the UUID list. + */ + hci_discovery_filter_clear(hdev); + + hdev->discovery.type = cp->type; + hdev->discovery.rssi = cp->rssi; + hdev->discovery.uuid_count = uuid_count; + + if (uuid_count > 0) { + hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16, + GFP_KERNEL); + if (!hdev->discovery.uuids) { + err = cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_FAILED, + &cp->type, sizeof(cp->type)); mgmt_pending_remove(cmd); goto failed; } + } - param_cp.type = LE_SCAN_ACTIVE; - param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT); - param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); - param_cp.own_address_type = own_addr_type; - hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), - ¶m_cp); - - memset(&enable_cp, 0, sizeof(enable_cp)); - enable_cp.enable = LE_SCAN_ENABLE; - enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), - &enable_cp); - break; + hci_req_init(&req, hdev); - default: - err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_INVALID_PARAMS); + if (!trigger_discovery(&req, &status)) { + err = cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + status, &cp->type, sizeof(cp->type)); mgmt_pending_remove(cmd); goto failed; } err = hci_req_run(&req, start_discovery_complete); - if (err < 0) + if (err < 0) { mgmt_pending_remove(cmd); - else - hci_discovery_set_state(hdev, DISCOVERY_STARTING); + goto failed; + } + + hci_discovery_set_state(hdev, DISCOVERY_STARTING); failed: hci_dev_unlock(hdev); return err; } -static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status) +static void stop_discovery_complete(struct hci_dev *hdev, u8 status) { struct pending_cmd *cmd; - int err; - - cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); - if (!cmd) - return -ENOENT; - err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status), - &hdev->discovery.type, sizeof(hdev->discovery.type)); - mgmt_pending_remove(cmd); - - return err; -} - -static void stop_discovery_complete(struct hci_dev *hdev, u8 status) -{ BT_DBG("status %d", status); hci_dev_lock(hdev); - if (status) { - mgmt_stop_discovery_failed(hdev, status); - goto unlock; + cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); + if (cmd) { + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); } - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + if (!status) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); -unlock: hci_dev_unlock(hdev); } @@ -3901,12 +4095,14 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0); + cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } + cmd->cmd_complete = generic_cmd_complete; + hci_req_init(&req, hdev); hci_stop_discovery(&req); @@ -4506,18 +4702,13 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, { struct mgmt_mode *cp = data; struct pending_cmd *cmd; - u8 val, status; + u8 val; int err; BT_DBG("request for %s", hdev->name); - status = mgmt_bredr_support(hdev); - if (status) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, - status); - - if (!lmp_sc_capable(hdev) && - !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) + if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && + !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_NOT_SUPPORTED); @@ -4527,7 +4718,10 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); - if (!hdev_is_powered(hdev)) { + if (!hdev_is_powered(hdev) || + (!lmp_sc_capable(hdev) && + !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) || + !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { bool changed; if (cp->val) { @@ -4844,18 +5038,26 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, else addr_type = ADDR_LE_DEV_RANDOM; - if (key->master) - type = SMP_LTK; - else - type = SMP_LTK_SLAVE; - switch (key->type) { case MGMT_LTK_UNAUTHENTICATED: authenticated = 0x00; + type = key->master ? SMP_LTK : SMP_LTK_SLAVE; break; case MGMT_LTK_AUTHENTICATED: authenticated = 0x01; + type = key->master ? SMP_LTK : SMP_LTK_SLAVE; + break; + case MGMT_LTK_P256_UNAUTH: + authenticated = 0x00; + type = SMP_LTK_P256; + break; + case MGMT_LTK_P256_AUTH: + authenticated = 0x01; + type = SMP_LTK_P256; break; + case MGMT_LTK_P256_DEBUG: + authenticated = 0x00; + type = SMP_LTK_P256_DEBUG; default: continue; } @@ -4873,67 +5075,42 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, return err; } -struct cmd_conn_lookup { - struct hci_conn *conn; - bool valid_tx_power; - u8 mgmt_status; -}; - -static void get_conn_info_complete(struct pending_cmd *cmd, void *data) +static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status) { - struct cmd_conn_lookup *match = data; - struct mgmt_cp_get_conn_info *cp; - struct mgmt_rp_get_conn_info rp; struct hci_conn *conn = cmd->user_data; + struct mgmt_rp_get_conn_info rp; - if (conn != match->conn) - return; - - cp = (struct mgmt_cp_get_conn_info *) cmd->param; + memcpy(&rp.addr, cmd->param, sizeof(rp.addr)); - memset(&rp, 0, sizeof(rp)); - bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); - rp.addr.type = cp->addr.type; - - if (!match->mgmt_status) { + if (status == MGMT_STATUS_SUCCESS) { rp.rssi = conn->rssi; - - if (match->valid_tx_power) { - rp.tx_power = conn->tx_power; - rp.max_tx_power = conn->max_tx_power; - } else { - rp.tx_power = HCI_TX_POWER_INVALID; - rp.max_tx_power = HCI_TX_POWER_INVALID; - } + rp.tx_power = conn->tx_power; + rp.max_tx_power = conn->max_tx_power; + } else { + rp.rssi = HCI_RSSI_INVALID; + rp.tx_power = HCI_TX_POWER_INVALID; + rp.max_tx_power = HCI_TX_POWER_INVALID; } - cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, - match->mgmt_status, &rp, sizeof(rp)); + cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, + &rp, sizeof(rp)); hci_conn_drop(conn); hci_conn_put(conn); - - mgmt_pending_remove(cmd); } -static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status) +static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status) { struct hci_cp_read_rssi *cp; + struct pending_cmd *cmd; struct hci_conn *conn; - struct cmd_conn_lookup match; u16 handle; + u8 status; - BT_DBG("status 0x%02x", status); + BT_DBG("status 0x%02x", hci_status); hci_dev_lock(hdev); - /* TX power data is valid in case request completed successfully, - * otherwise we assume it's not valid. At the moment we assume that - * either both or none of current and max values are valid to keep code - * simple. - */ - match.valid_tx_power = !status; - /* Commands sent in request are either Read RSSI or Read Transmit Power * Level so we check which one was last sent to retrieve connection * handle. Both commands have handle as first parameter so it's safe to @@ -4946,29 +5123,29 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status) cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI); if (!cp) { cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); - status = 0; + status = MGMT_STATUS_SUCCESS; + } else { + status = mgmt_status(hci_status); } if (!cp) { - BT_ERR("invalid sent_cmd in response"); + BT_ERR("invalid sent_cmd in conn_info response"); goto unlock; } handle = __le16_to_cpu(cp->handle); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) { - BT_ERR("unknown handle (%d) in response", handle); + BT_ERR("unknown handle (%d) in conn_info response", handle); goto unlock; } - match.conn = conn; - match.mgmt_status = mgmt_status(status); + cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn); + if (!cmd) + goto unlock; - /* Cache refresh is complete, now reply for mgmt request for given - * connection only. - */ - mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev, - get_conn_info_complete, &match); + cmd->cmd_complete(cmd, status); + mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); @@ -5014,6 +5191,12 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } + if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) { + err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_BUSY, &rp, sizeof(rp)); + goto unlock; + } + /* To avoid client trying to guess when to poll again for information we * calculate conn info age as random value between min/max set in hdev. */ @@ -5069,6 +5252,7 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, hci_conn_hold(conn); cmd->user_data = hci_conn_get(conn); + cmd->cmd_complete = conn_info_cmd_complete; conn->conn_info_timestamp = jiffies; } else { @@ -5086,10 +5270,40 @@ unlock: return err; } -static void get_clock_info_complete(struct hci_dev *hdev, u8 status) +static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status) { - struct mgmt_cp_get_clock_info *cp; + struct hci_conn *conn = cmd->user_data; struct mgmt_rp_get_clock_info rp; + struct hci_dev *hdev; + + memset(&rp, 0, sizeof(rp)); + memcpy(&rp.addr, &cmd->param, sizeof(rp.addr)); + + if (status) + goto complete; + + hdev = hci_dev_get(cmd->index); + if (hdev) { + rp.local_clock = cpu_to_le32(hdev->clock); + hci_dev_put(hdev); + } + + if (conn) { + rp.piconet_clock = cpu_to_le32(conn->clock); + rp.accuracy = cpu_to_le16(conn->clock_accuracy); + } + +complete: + cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp)); + + if (conn) { + hci_conn_drop(conn); + hci_conn_put(conn); + } +} + +static void get_clock_info_complete(struct hci_dev *hdev, u8 status) +{ struct hci_cp_read_clock *hci_cp; struct pending_cmd *cmd; struct hci_conn *conn; @@ -5113,29 +5327,8 @@ static void get_clock_info_complete(struct hci_dev *hdev, u8 status) if (!cmd) goto unlock; - cp = cmd->param; - - memset(&rp, 0, sizeof(rp)); - memcpy(&rp.addr, &cp->addr, sizeof(rp.addr)); - - if (status) - goto send_rsp; - - rp.local_clock = cpu_to_le32(hdev->clock); - - if (conn) { - rp.piconet_clock = cpu_to_le32(conn->clock); - rp.accuracy = cpu_to_le16(conn->clock_accuracy); - } - -send_rsp: - cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), - &rp, sizeof(rp)); + cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); - if (conn) { - hci_conn_drop(conn); - hci_conn_put(conn); - } unlock: hci_dev_unlock(hdev); @@ -5191,6 +5384,8 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } + cmd->cmd_complete = clock_info_cmd_complete; + hci_req_init(&req, hdev); memset(&hci_cp, 0, sizeof(hci_cp)); @@ -5680,6 +5875,7 @@ static const struct mgmt_handler { { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE }, { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE }, { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE }, + { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE }, }; int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) @@ -5701,7 +5897,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) if (!buf) return -ENOMEM; - if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) { + if (memcpy_from_msg(buf, msg, msglen)) { err = -EFAULT; goto done; } @@ -5816,7 +6012,7 @@ void mgmt_index_removed(struct hci_dev *hdev) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; - mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); + mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL); @@ -5951,7 +6147,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) } mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); - mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered); + mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status_not_powered); if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, @@ -6035,8 +6231,19 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, static u8 mgmt_ltk_type(struct smp_ltk *ltk) { - if (ltk->authenticated) - return MGMT_LTK_AUTHENTICATED; + switch (ltk->type) { + case SMP_LTK: + case SMP_LTK_SLAVE: + if (ltk->authenticated) + return MGMT_LTK_AUTHENTICATED; + return MGMT_LTK_UNAUTHENTICATED; + case SMP_LTK_P256: + if (ltk->authenticated) + return MGMT_LTK_P256_AUTH; + return MGMT_LTK_P256_UNAUTH; + case SMP_LTK_P256_DEBUG: + return MGMT_LTK_P256_DEBUG; + } return MGMT_LTK_UNAUTHENTICATED; } @@ -6171,26 +6378,36 @@ static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, return eir_len; } -void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u32 flags, u8 *name, u8 name_len, - u8 *dev_class) +void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, + u32 flags, u8 *name, u8 name_len) { char buf[512]; struct mgmt_ev_device_connected *ev = (void *) buf; u16 eir_len = 0; - bacpy(&ev->addr.bdaddr, bdaddr); - ev->addr.type = link_to_bdaddr(link_type, addr_type); + bacpy(&ev->addr.bdaddr, &conn->dst); + ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); ev->flags = __cpu_to_le32(flags); - if (name_len > 0) - eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, - name, name_len); + /* We must ensure that the EIR Data fields are ordered and + * unique. Keep it simple for now and avoid the problem by not + * adding any BR/EDR data to the LE adv. + */ + if (conn->le_adv_data_len > 0) { + memcpy(&ev->eir[eir_len], + conn->le_adv_data, conn->le_adv_data_len); + eir_len = conn->le_adv_data_len; + } else { + if (name_len > 0) + eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, + name, name_len); - if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0) - eir_len = eir_append_data(ev->eir, eir_len, - EIR_CLASS_OF_DEV, dev_class, 3); + if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) + eir_len = eir_append_data(ev->eir, eir_len, + EIR_CLASS_OF_DEV, + conn->dev_class, 3); + } ev->eir_len = cpu_to_le16(eir_len); @@ -6200,15 +6417,9 @@ void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, static void disconnect_rsp(struct pending_cmd *cmd, void *data) { - struct mgmt_cp_disconnect *cp = cmd->param; struct sock **sk = data; - struct mgmt_rp_disconnect rp; - - bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); - rp.addr.type = cp->addr.type; - cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp, - sizeof(rp)); + cmd->cmd_complete(cmd, 0); *sk = cmd->sk; sock_hold(*sk); @@ -6220,16 +6431,10 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data) { struct hci_dev *hdev = data; struct mgmt_cp_unpair_device *cp = cmd->param; - struct mgmt_rp_unpair_device rp; - - memset(&rp, 0, sizeof(rp)); - bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); - rp.addr.type = cp->addr.type; device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); - cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp)); - + cmd->cmd_complete(cmd, 0); mgmt_pending_remove(cmd); } @@ -6290,7 +6495,6 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, { u8 bdaddr_type = link_to_bdaddr(link_type, addr_type); struct mgmt_cp_disconnect *cp; - struct mgmt_rp_disconnect rp; struct pending_cmd *cmd; mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, @@ -6308,12 +6512,7 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, if (cp->addr.type != bdaddr_type) return; - bacpy(&rp.addr.bdaddr, bdaddr); - rp.addr.type = bdaddr_type; - - cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, - mgmt_status(status), &rp, sizeof(rp)); - + cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } @@ -6352,18 +6551,12 @@ void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) { struct pending_cmd *cmd; - struct mgmt_rp_pin_code_reply rp; cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); if (!cmd) return; - bacpy(&rp.addr.bdaddr, bdaddr); - rp.addr.type = BDADDR_BREDR; - - cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, - mgmt_status(status), &rp, sizeof(rp)); - + cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } @@ -6371,18 +6564,12 @@ void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) { struct pending_cmd *cmd; - struct mgmt_rp_pin_code_reply rp; cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); if (!cmd) return; - bacpy(&rp.addr.bdaddr, bdaddr); - rp.addr.type = BDADDR_BREDR; - - cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, - mgmt_status(status), &rp, sizeof(rp)); - + cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } @@ -6422,21 +6609,15 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 opcode) { struct pending_cmd *cmd; - struct mgmt_rp_user_confirm_reply rp; - int err; cmd = mgmt_pending_find(opcode, hdev); if (!cmd) return -ENOENT; - bacpy(&rp.addr.bdaddr, bdaddr); - rp.addr.type = link_to_bdaddr(link_type, addr_type); - err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status), - &rp, sizeof(rp)); - + cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); - return err; + return 0; } int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, @@ -6693,8 +6874,8 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) } void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, - u8 *randomizer192, u8 *hash256, - u8 *randomizer256, u8 status) + u8 *rand192, u8 *hash256, u8 *rand256, + u8 status) { struct pending_cmd *cmd; @@ -6708,17 +6889,14 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, mgmt_status(status)); } else { - if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && - hash256 && randomizer256) { + if (bredr_sc_enabled(hdev) && hash256 && rand256) { struct mgmt_rp_read_local_oob_ext_data rp; memcpy(rp.hash192, hash192, sizeof(rp.hash192)); - memcpy(rp.randomizer192, randomizer192, - sizeof(rp.randomizer192)); + memcpy(rp.rand192, rand192, sizeof(rp.rand192)); memcpy(rp.hash256, hash256, sizeof(rp.hash256)); - memcpy(rp.randomizer256, randomizer256, - sizeof(rp.randomizer256)); + memcpy(rp.rand256, rand256, sizeof(rp.rand256)); cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0, @@ -6727,8 +6905,7 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, struct mgmt_rp_read_local_oob_data rp; memcpy(rp.hash, hash192, sizeof(rp.hash)); - memcpy(rp.randomizer, randomizer192, - sizeof(rp.randomizer)); + memcpy(rp.rand, rand192, sizeof(rp.rand)); cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0, @@ -6739,6 +6916,73 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, mgmt_pending_remove(cmd); } +static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16]) +{ + int i; + + for (i = 0; i < uuid_count; i++) { + if (!memcmp(uuid, uuids[i], 16)) + return true; + } + + return false; +} + +static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16]) +{ + u16 parsed = 0; + + while (parsed < eir_len) { + u8 field_len = eir[0]; + u8 uuid[16]; + int i; + + if (field_len == 0) + break; + + if (eir_len - parsed < field_len + 1) + break; + + switch (eir[1]) { + case EIR_UUID16_ALL: + case EIR_UUID16_SOME: + for (i = 0; i + 3 <= field_len; i += 2) { + memcpy(uuid, bluetooth_base_uuid, 16); + uuid[13] = eir[i + 3]; + uuid[12] = eir[i + 2]; + if (has_uuid(uuid, uuid_count, uuids)) + return true; + } + break; + case EIR_UUID32_ALL: + case EIR_UUID32_SOME: + for (i = 0; i + 5 <= field_len; i += 4) { + memcpy(uuid, bluetooth_base_uuid, 16); + uuid[15] = eir[i + 5]; + uuid[14] = eir[i + 4]; + uuid[13] = eir[i + 3]; + uuid[12] = eir[i + 2]; + if (has_uuid(uuid, uuid_count, uuids)) + return true; + } + break; + case EIR_UUID128_ALL: + case EIR_UUID128_SOME: + for (i = 0; i + 17 <= field_len; i += 16) { + memcpy(uuid, eir + i + 2, 16); + if (has_uuid(uuid, uuid_count, uuids)) + return true; + } + break; + } + + parsed += field_len + 1; + eir += field_len + 1; + } + + return false; +} + void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) @@ -6746,6 +6990,7 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, char buf[512]; struct mgmt_ev_device_found *ev = (void *) buf; size_t ev_size; + bool match; /* Don't send events for a non-kernel initiated discovery. With * LE one exception is if we have pend_le_reports > 0 in which @@ -6758,6 +7003,18 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, return; } + /* When using service discovery with a RSSI threshold, then check + * if such a RSSI threshold is specified. If a RSSI threshold has + * been specified, then all results with a RSSI smaller than the + * RSSI threshold will be dropped. + * + * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry, + * the results are also dropped. + */ + if (hdev->discovery.rssi != HCI_RSSI_INVALID && + (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID)) + return; + /* Make sure that the buffer is big enough. The 5 extra bytes * are for the potential CoD field. */ @@ -6766,20 +7023,75 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, memset(buf, 0, sizeof(buf)); + /* In case of device discovery with BR/EDR devices (pre 1.2), the + * RSSI value was reported as 0 when not available. This behavior + * is kept when using device discovery. This is required for full + * backwards compatibility with the API. + * + * However when using service discovery, the value 127 will be + * returned when the RSSI is not available. + */ + if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi) + rssi = 0; + bacpy(&ev->addr.bdaddr, bdaddr); ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->rssi = rssi; ev->flags = cpu_to_le32(flags); - if (eir_len > 0) + if (eir_len > 0) { + /* When using service discovery and a list of UUID is + * provided, results with no matching UUID should be + * dropped. In case there is a match the result is + * kept and checking possible scan response data + * will be skipped. + */ + if (hdev->discovery.uuid_count > 0) { + match = eir_has_uuids(eir, eir_len, + hdev->discovery.uuid_count, + hdev->discovery.uuids); + if (!match) + return; + } + + /* Copy EIR or advertising data into event */ memcpy(ev->eir, eir, eir_len); + } else { + /* When using service discovery and a list of UUID is + * provided, results with empty EIR or advertising data + * should be dropped since they do not match any UUID. + */ + if (hdev->discovery.uuid_count > 0) + return; + } if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV)) eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, dev_class, 3); - if (scan_rsp_len > 0) + if (scan_rsp_len > 0) { + /* When using service discovery and a list of UUID is + * provided, results with no matching UUID should be + * dropped if there is no previous match from the + * advertising data. + */ + if (hdev->discovery.uuid_count > 0) { + if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len, + hdev->discovery.uuid_count, + hdev->discovery.uuids)) + return; + } + + /* Append scan response data to event */ memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len); + } else { + /* When using service discovery and a list of UUID is + * provided, results with empty scan response and no + * previous matched advertising data should be dropped. + */ + if (hdev->discovery.uuid_count > 0 && !match) + return; + } ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); ev_size = sizeof(*ev) + eir_len + scan_rsp_len; @@ -6813,23 +7125,9 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, void mgmt_discovering(struct hci_dev *hdev, u8 discovering) { struct mgmt_ev_discovering ev; - struct pending_cmd *cmd; BT_DBG("%s discovering %u", hdev->name, discovering); - if (discovering) - cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); - else - cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); - - if (cmd != NULL) { - u8 type = hdev->discovery.type; - - cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type, - sizeof(type)); - mgmt_pending_remove(cmd); - } - memset(&ev, 0, sizeof(ev)); ev.type = hdev->discovery.type; ev.discovering = discovering; diff --git a/net/bluetooth/rfcomm/Kconfig b/net/bluetooth/rfcomm/Kconfig index 18d352ea2bc7e2a38672fda575a602b6a0ccab0d..335df751522064192b4a83270a657724482065b7 100644 --- a/net/bluetooth/rfcomm/Kconfig +++ b/net/bluetooth/rfcomm/Kconfig @@ -1,6 +1,6 @@ config BT_RFCOMM tristate "RFCOMM protocol support" - depends on BT + depends on BT_BREDR help RFCOMM provides connection oriented stream transport. RFCOMM support is required for Dialup Networking, OBEX and other Bluetooth diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 410dd5e76c41bff9111f6c32fbf8aff536974ddd..73f8c75abe6ec40d5376bdeb5114ab99f2e737f1 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -78,8 +78,10 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s); #define __get_type(b) ((b & 0xef)) #define __test_ea(b) ((b & 0x01)) -#define __test_cr(b) ((b & 0x02)) -#define __test_pf(b) ((b & 0x10)) +#define __test_cr(b) (!!(b & 0x02)) +#define __test_pf(b) (!!(b & 0x10)) + +#define __session_dir(s) ((s)->initiator ? 0x00 : 0x01) #define __addr(cr, dlci) (((dlci & 0x3f) << 2) | (cr << 1) | 0x01) #define __ctrl(type, pf) (((type & 0xef) | (pf << 4))) @@ -388,7 +390,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, return err; } - dlci = __dlci(!s->initiator, channel); + dlci = __dlci(__session_dir(s), channel); /* Check if DLCI already exists */ if (rfcomm_dlc_get(s, dlci)) @@ -543,7 +545,7 @@ struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel) rfcomm_lock(); s = rfcomm_session_get(src, dst); if (s) { - dlci = __dlci(!s->initiator, channel); + dlci = __dlci(__session_dir(s), channel); dlc = rfcomm_dlc_get(s, dlci); } rfcomm_unlock(); @@ -904,7 +906,7 @@ static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) hdr->len = __len8(sizeof(*mcc) + 1); mcc = (void *) ptr; ptr += sizeof(*mcc); - mcc->type = __mcc_type(cr, RFCOMM_NSC); + mcc->type = __mcc_type(0, RFCOMM_NSC); mcc->len = __len8(1); /* Type that we didn't like */ diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 8bbbb5ec468c373f86c6caafe9187f3684775915..2348176401a0b19ad3b5e9129999381f43da50a0 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -588,7 +588,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, } skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); - err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); + err = memcpy_from_msg(skb_put(skb, size), msg, size); if (err) { kfree_skb(skb); if (sent == 0) diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 7ee9e4ab00f882f827f7b63361dc82a24ca72a23..30e5ea3f1ad311388bf7f95aec8f70cb88e8fab9 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -285,7 +285,7 @@ static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) if (!skb) return err; - if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + if (memcpy_from_msg(skb_put(skb, len), msg, len)) { kfree_skb(skb); return -EFAULT; } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index f09b6b65cf6b22079d6ce60d53f2613ccf6a7745..6a46252fe66f39fb1cc490987d6c14d2ec955dfa 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -29,14 +29,34 @@ #include #include +#include "ecc.h" #include "smp.h" +/* Low-level debug macros to be used for stuff that we don't want + * accidentially in dmesg, i.e. the values of the various crypto keys + * and the inputs & outputs of crypto functions. + */ +#ifdef DEBUG +#define SMP_DBG(fmt, ...) printk(KERN_DEBUG "%s: " fmt, __func__, \ + ##__VA_ARGS__) +#else +#define SMP_DBG(fmt, ...) no_printk(KERN_DEBUG "%s: " fmt, __func__, \ + ##__VA_ARGS__) +#endif + #define SMP_ALLOW_CMD(smp, code) set_bit(code, &smp->allow_cmd) +/* Keys which are not distributed with Secure Connections */ +#define SMP_SC_NO_DIST (SMP_DIST_ENC_KEY | SMP_DIST_LINK_KEY); + #define SMP_TIMEOUT msecs_to_jiffies(30000) -#define AUTH_REQ_MASK 0x07 -#define KEY_DIST_MASK 0x07 +#define AUTH_REQ_MASK(dev) (test_bit(HCI_SC_ENABLED, &(dev)->dev_flags) ? \ + 0x1f : 0x07) +#define KEY_DIST_MASK 0x07 + +/* Maximum message length that can be passed to aes_cmac */ +#define CMAC_MSG_MAX 80 enum { SMP_FLAG_TK_VALID, @@ -44,6 +64,12 @@ enum { SMP_FLAG_MITM_AUTH, SMP_FLAG_COMPLETE, SMP_FLAG_INITIATOR, + SMP_FLAG_SC, + SMP_FLAG_REMOTE_PK, + SMP_FLAG_DEBUG_KEY, + SMP_FLAG_WAIT_USER, + SMP_FLAG_DHKEY_PENDING, + SMP_FLAG_OOB, }; struct smp_chan { @@ -57,6 +83,7 @@ struct smp_chan { u8 rrnd[16]; /* SMP Pairing Random (remote) */ u8 pcnf[16]; /* SMP Pairing Confirm */ u8 tk[16]; /* SMP Temporary Key */ + u8 rr[16]; u8 enc_key_size; u8 remote_key_dist; bdaddr_t id_addr; @@ -67,9 +94,43 @@ struct smp_chan { struct smp_ltk *ltk; struct smp_ltk *slave_ltk; struct smp_irk *remote_irk; + u8 *link_key; unsigned long flags; + u8 method; + u8 passkey_round; + + /* Secure Connections variables */ + u8 local_pk[64]; + u8 local_sk[32]; + u8 remote_pk[64]; + u8 dhkey[32]; + u8 mackey[16]; struct crypto_blkcipher *tfm_aes; + struct crypto_hash *tfm_cmac; +}; + +/* These debug key values are defined in the SMP section of the core + * specification. debug_pk is the public debug key and debug_sk the + * private debug key. + */ +static const u8 debug_pk[64] = { + 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, + 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, + 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, + 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20, + + 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74, + 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76, + 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63, + 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc, +}; + +static const u8 debug_sk[32] = { + 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58, + 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a, + 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74, + 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f, }; static inline void swap_buf(const u8 *src, u8 *dst, size_t len) @@ -80,14 +141,22 @@ static inline void swap_buf(const u8 *src, u8 *dst, size_t len) dst[len - 1 - i] = src[i]; } -static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) +/* The following functions map to the LE SC SMP crypto functions + * AES-CMAC, f4, f5, f6, g2 and h6. + */ + +static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m, + size_t len, u8 mac[16]) { - struct blkcipher_desc desc; + uint8_t tmp[16], mac_msb[16], msg_msb[CMAC_MSG_MAX]; + struct hash_desc desc; struct scatterlist sg; - uint8_t tmp[16], data[16]; int err; - if (tfm == NULL) { + if (len > CMAC_MSG_MAX) + return -EFBIG; + + if (!tfm) { BT_ERR("tfm %p", tfm); return -EINVAL; } @@ -95,112 +164,237 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) desc.tfm = tfm; desc.flags = 0; - /* The most significant octet of key corresponds to k[0] */ + crypto_hash_init(&desc); + + /* Swap key and message from LSB to MSB */ swap_buf(k, tmp, 16); + swap_buf(m, msg_msb, len); - err = crypto_blkcipher_setkey(tfm, tmp, 16); + SMP_DBG("msg (len %zu) %*phN", len, (int) len, m); + SMP_DBG("key %16phN", k); + + err = crypto_hash_setkey(tfm, tmp, 16); if (err) { BT_ERR("cipher setkey failed: %d", err); return err; } - /* Most significant octet of plaintextData corresponds to data[0] */ - swap_buf(r, data, 16); + sg_init_one(&sg, msg_msb, len); - sg_init_one(&sg, data, 16); + err = crypto_hash_update(&desc, &sg, len); + if (err) { + BT_ERR("Hash update error %d", err); + return err; + } - err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16); + err = crypto_hash_final(&desc, mac_msb); + if (err) { + BT_ERR("Hash final error %d", err); + return err; + } + + swap_buf(mac_msb, mac, 16); + + SMP_DBG("mac %16phN", mac); + + return 0; +} + +static int smp_f4(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32], + const u8 x[16], u8 z, u8 res[16]) +{ + u8 m[65]; + int err; + + SMP_DBG("u %32phN", u); + SMP_DBG("v %32phN", v); + SMP_DBG("x %16phN z %02x", x, z); + + m[0] = z; + memcpy(m + 1, v, 32); + memcpy(m + 33, u, 32); + + err = aes_cmac(tfm_cmac, x, m, sizeof(m), res); if (err) - BT_ERR("Encrypt data error %d", err); + return err; - /* Most significant octet of encryptedData corresponds to data[0] */ - swap_buf(data, r, 16); + SMP_DBG("res %16phN", res); return err; } -static int smp_ah(struct crypto_blkcipher *tfm, u8 irk[16], u8 r[3], u8 res[3]) +static int smp_f5(struct crypto_hash *tfm_cmac, u8 w[32], u8 n1[16], u8 n2[16], + u8 a1[7], u8 a2[7], u8 mackey[16], u8 ltk[16]) { - u8 _res[16]; + /* The btle, salt and length "magic" values are as defined in + * the SMP section of the Bluetooth core specification. In ASCII + * the btle value ends up being 'btle'. The salt is just a + * random number whereas length is the value 256 in little + * endian format. + */ + const u8 btle[4] = { 0x65, 0x6c, 0x74, 0x62 }; + const u8 salt[16] = { 0xbe, 0x83, 0x60, 0x5a, 0xdb, 0x0b, 0x37, 0x60, + 0x38, 0xa5, 0xf5, 0xaa, 0x91, 0x83, 0x88, 0x6c }; + const u8 length[2] = { 0x00, 0x01 }; + u8 m[53], t[16]; int err; - /* r' = padding || r */ - memcpy(_res, r, 3); - memset(_res + 3, 0, 13); + SMP_DBG("w %32phN", w); + SMP_DBG("n1 %16phN n2 %16phN", n1, n2); + SMP_DBG("a1 %7phN a2 %7phN", a1, a2); - err = smp_e(tfm, irk, _res); - if (err) { - BT_ERR("Encrypt error"); + err = aes_cmac(tfm_cmac, salt, w, 32, t); + if (err) return err; - } - /* The output of the random address function ah is: - * ah(h, r) = e(k, r') mod 2^24 - * The output of the security function e is then truncated to 24 bits - * by taking the least significant 24 bits of the output of e as the - * result of ah. - */ - memcpy(res, _res, 3); + SMP_DBG("t %16phN", t); + + memcpy(m, length, 2); + memcpy(m + 2, a2, 7); + memcpy(m + 9, a1, 7); + memcpy(m + 16, n2, 16); + memcpy(m + 32, n1, 16); + memcpy(m + 48, btle, 4); + + m[52] = 0; /* Counter */ + + err = aes_cmac(tfm_cmac, t, m, sizeof(m), mackey); + if (err) + return err; + + SMP_DBG("mackey %16phN", mackey); + + m[52] = 1; /* Counter */ + + err = aes_cmac(tfm_cmac, t, m, sizeof(m), ltk); + if (err) + return err; + + SMP_DBG("ltk %16phN", ltk); return 0; } -bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr) +static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16], + const u8 n1[16], u8 n2[16], const u8 r[16], + const u8 io_cap[3], const u8 a1[7], const u8 a2[7], + u8 res[16]) { - struct l2cap_chan *chan = hdev->smp_data; - struct crypto_blkcipher *tfm; - u8 hash[3]; + u8 m[65]; int err; - if (!chan || !chan->data) - return false; + SMP_DBG("w %16phN", w); + SMP_DBG("n1 %16phN n2 %16phN", n1, n2); + SMP_DBG("r %16phN io_cap %3phN a1 %7phN a2 %7phN", r, io_cap, a1, a2); - tfm = chan->data; + memcpy(m, a2, 7); + memcpy(m + 7, a1, 7); + memcpy(m + 14, io_cap, 3); + memcpy(m + 17, r, 16); + memcpy(m + 33, n2, 16); + memcpy(m + 49, n1, 16); - BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk); + err = aes_cmac(tfm_cmac, w, m, sizeof(m), res); + if (err) + return err; - err = smp_ah(tfm, irk, &bdaddr->b[3], hash); + BT_DBG("res %16phN", res); + + return err; +} + +static int smp_g2(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32], + const u8 x[16], const u8 y[16], u32 *val) +{ + u8 m[80], tmp[16]; + int err; + + SMP_DBG("u %32phN", u); + SMP_DBG("v %32phN", v); + SMP_DBG("x %16phN y %16phN", x, y); + + memcpy(m, y, 16); + memcpy(m + 16, v, 32); + memcpy(m + 48, u, 32); + + err = aes_cmac(tfm_cmac, x, m, sizeof(m), tmp); if (err) - return false; + return err; - return !memcmp(bdaddr->b, hash, 3); + *val = get_unaligned_le32(tmp); + *val %= 1000000; + + SMP_DBG("val %06u", *val); + + return 0; } -int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa) +static int smp_h6(struct crypto_hash *tfm_cmac, const u8 w[16], + const u8 key_id[4], u8 res[16]) { - struct l2cap_chan *chan = hdev->smp_data; - struct crypto_blkcipher *tfm; int err; - if (!chan || !chan->data) - return -EOPNOTSUPP; + SMP_DBG("w %16phN key_id %4phN", w, key_id); - tfm = chan->data; + err = aes_cmac(tfm_cmac, w, key_id, 4, res); + if (err) + return err; - get_random_bytes(&rpa->b[3], 3); + SMP_DBG("res %16phN", res); - rpa->b[5] &= 0x3f; /* Clear two most significant bits */ - rpa->b[5] |= 0x40; /* Set second most significant bit */ + return err; +} - err = smp_ah(tfm, irk, &rpa->b[3], rpa->b); - if (err < 0) +/* The following functions map to the legacy SMP crypto functions e, c1, + * s1 and ah. + */ + +static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) +{ + struct blkcipher_desc desc; + struct scatterlist sg; + uint8_t tmp[16], data[16]; + int err; + + if (!tfm) { + BT_ERR("tfm %p", tfm); + return -EINVAL; + } + + desc.tfm = tfm; + desc.flags = 0; + + /* The most significant octet of key corresponds to k[0] */ + swap_buf(k, tmp, 16); + + err = crypto_blkcipher_setkey(tfm, tmp, 16); + if (err) { + BT_ERR("cipher setkey failed: %d", err); return err; + } - BT_DBG("RPA %pMR", rpa); + /* Most significant octet of plaintextData corresponds to data[0] */ + swap_buf(r, data, 16); - return 0; + sg_init_one(&sg, data, 16); + + err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16); + if (err) + BT_ERR("Encrypt data error %d", err); + + /* Most significant octet of encryptedData corresponds to data[0] */ + swap_buf(data, r, 16); + + return err; } -static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7], - u8 pres[7], u8 _iat, bdaddr_t *ia, u8 _rat, bdaddr_t *ra, - u8 res[16]) +static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16], + const u8 r[16], const u8 preq[7], const u8 pres[7], u8 _iat, + const bdaddr_t *ia, u8 _rat, const bdaddr_t *ra, u8 res[16]) { - struct hci_dev *hdev = smp->conn->hcon->hdev; u8 p1[16], p2[16]; int err; - BT_DBG("%s", hdev->name); - memset(p1, 0, 16); /* p1 = pres || preq || _rat || _iat */ @@ -218,7 +412,7 @@ static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7], u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); /* res = e(k, res) */ - err = smp_e(smp->tfm_aes, k, res); + err = smp_e(tfm_aes, k, res); if (err) { BT_ERR("Encrypt data error"); return err; @@ -228,32 +422,103 @@ static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7], u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); /* res = e(k, res) */ - err = smp_e(smp->tfm_aes, k, res); + err = smp_e(tfm_aes, k, res); if (err) BT_ERR("Encrypt data error"); return err; } -static int smp_s1(struct smp_chan *smp, u8 k[16], u8 r1[16], u8 r2[16], - u8 _r[16]) +static int smp_s1(struct crypto_blkcipher *tfm_aes, const u8 k[16], + const u8 r1[16], const u8 r2[16], u8 _r[16]) { - struct hci_dev *hdev = smp->conn->hcon->hdev; int err; - BT_DBG("%s", hdev->name); - /* Just least significant octets from r1 and r2 are considered */ memcpy(_r, r2, 8); memcpy(_r + 8, r1, 8); - err = smp_e(smp->tfm_aes, k, _r); + err = smp_e(tfm_aes, k, _r); if (err) BT_ERR("Encrypt data error"); return err; } +static int smp_ah(struct crypto_blkcipher *tfm, const u8 irk[16], + const u8 r[3], u8 res[3]) +{ + u8 _res[16]; + int err; + + /* r' = padding || r */ + memcpy(_res, r, 3); + memset(_res + 3, 0, 13); + + err = smp_e(tfm, irk, _res); + if (err) { + BT_ERR("Encrypt error"); + return err; + } + + /* The output of the random address function ah is: + * ah(h, r) = e(k, r') mod 2^24 + * The output of the security function e is then truncated to 24 bits + * by taking the least significant 24 bits of the output of e as the + * result of ah. + */ + memcpy(res, _res, 3); + + return 0; +} + +bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], + const bdaddr_t *bdaddr) +{ + struct l2cap_chan *chan = hdev->smp_data; + struct crypto_blkcipher *tfm; + u8 hash[3]; + int err; + + if (!chan || !chan->data) + return false; + + tfm = chan->data; + + BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk); + + err = smp_ah(tfm, irk, &bdaddr->b[3], hash); + if (err) + return false; + + return !memcmp(bdaddr->b, hash, 3); +} + +int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa) +{ + struct l2cap_chan *chan = hdev->smp_data; + struct crypto_blkcipher *tfm; + int err; + + if (!chan || !chan->data) + return -EOPNOTSUPP; + + tfm = chan->data; + + get_random_bytes(&rpa->b[3], 3); + + rpa->b[5] &= 0x3f; /* Clear two most significant bits */ + rpa->b[5] |= 0x40; /* Set second most significant bit */ + + err = smp_ah(tfm, irk, &rpa->b[3], rpa->b); + if (err < 0) + return err; + + BT_DBG("RPA %pMR", rpa); + + return 0; +} + static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) { struct l2cap_chan *chan = conn->smp; @@ -274,8 +539,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) memset(&msg, 0, sizeof(msg)); - msg.msg_iov = (struct iovec *) &iv; - msg.msg_iovlen = 2; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iv, 2, 1 + len); l2cap_chan_send(chan, &msg, 1 + len); @@ -288,17 +552,22 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) schedule_delayed_work(&smp->security_timer, SMP_TIMEOUT); } -static __u8 authreq_to_seclevel(__u8 authreq) +static u8 authreq_to_seclevel(u8 authreq) { - if (authreq & SMP_AUTH_MITM) - return BT_SECURITY_HIGH; - else + if (authreq & SMP_AUTH_MITM) { + if (authreq & SMP_AUTH_SC) + return BT_SECURITY_FIPS; + else + return BT_SECURITY_HIGH; + } else { return BT_SECURITY_MEDIUM; + } } static __u8 seclevel_to_authreq(__u8 sec_level) { switch (sec_level) { + case BT_SECURITY_FIPS: case BT_SECURITY_HIGH: return SMP_AUTH_MITM | SMP_AUTH_BONDING; case BT_SECURITY_MEDIUM: @@ -316,7 +585,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; - u8 local_dist = 0, remote_dist = 0; + u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT; if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) { local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; @@ -332,24 +601,52 @@ static void build_pairing_cmd(struct l2cap_conn *conn, if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) local_dist |= SMP_DIST_ID_KEY; + if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && + (authreq & SMP_AUTH_SC)) { + struct oob_data *oob_data; + u8 bdaddr_type; + + if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { + local_dist |= SMP_DIST_LINK_KEY; + remote_dist |= SMP_DIST_LINK_KEY; + } + + if (hcon->dst_type == ADDR_LE_DEV_PUBLIC) + bdaddr_type = BDADDR_LE_PUBLIC; + else + bdaddr_type = BDADDR_LE_RANDOM; + + oob_data = hci_find_remote_oob_data(hdev, &hcon->dst, + bdaddr_type); + if (oob_data) { + set_bit(SMP_FLAG_OOB, &smp->flags); + oob_flag = SMP_OOB_PRESENT; + memcpy(smp->rr, oob_data->rand256, 16); + memcpy(smp->pcnf, oob_data->hash256, 16); + } + + } else { + authreq &= ~SMP_AUTH_SC; + } + if (rsp == NULL) { req->io_capability = conn->hcon->io_capability; - req->oob_flag = SMP_OOB_NOT_PRESENT; + req->oob_flag = oob_flag; req->max_key_size = SMP_MAX_ENC_KEY_SIZE; req->init_key_dist = local_dist; req->resp_key_dist = remote_dist; - req->auth_req = (authreq & AUTH_REQ_MASK); + req->auth_req = (authreq & AUTH_REQ_MASK(hdev)); smp->remote_key_dist = remote_dist; return; } rsp->io_capability = conn->hcon->io_capability; - rsp->oob_flag = SMP_OOB_NOT_PRESENT; + rsp->oob_flag = oob_flag; rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; rsp->init_key_dist = req->init_key_dist & remote_dist; rsp->resp_key_dist = req->resp_key_dist & local_dist; - rsp->auth_req = (authreq & AUTH_REQ_MASK); + rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev)); smp->remote_key_dist = rsp->init_key_dist; } @@ -372,6 +669,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn) { struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; bool complete; BUG_ON(!smp); @@ -379,34 +677,46 @@ static void smp_chan_destroy(struct l2cap_conn *conn) cancel_delayed_work_sync(&smp->security_timer); complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); - mgmt_smp_complete(conn->hcon, complete); + mgmt_smp_complete(hcon, complete); kfree(smp->csrk); kfree(smp->slave_csrk); + kfree(smp->link_key); crypto_free_blkcipher(smp->tfm_aes); + crypto_free_hash(smp->tfm_cmac); + + /* Ensure that we don't leave any debug key around if debug key + * support hasn't been explicitly enabled. + */ + if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG && + !test_bit(HCI_KEEP_DEBUG_KEYS, &hcon->hdev->dev_flags)) { + list_del_rcu(&smp->ltk->list); + kfree_rcu(smp->ltk, rcu); + smp->ltk = NULL; + } /* If pairing failed clean up any keys we might have */ if (!complete) { if (smp->ltk) { - list_del(&smp->ltk->list); - kfree(smp->ltk); + list_del_rcu(&smp->ltk->list); + kfree_rcu(smp->ltk, rcu); } if (smp->slave_ltk) { - list_del(&smp->slave_ltk->list); - kfree(smp->slave_ltk); + list_del_rcu(&smp->slave_ltk->list); + kfree_rcu(smp->slave_ltk, rcu); } if (smp->remote_irk) { - list_del(&smp->remote_irk->list); - kfree(smp->remote_irk); + list_del_rcu(&smp->remote_irk->list); + kfree_rcu(smp->remote_irk, rcu); } } chan->data = NULL; kfree(smp); - hci_conn_drop(conn->hcon); + hci_conn_drop(hcon); } static void smp_failure(struct l2cap_conn *conn, u8 reason) @@ -430,6 +740,7 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason) #define REQ_PASSKEY 0x02 #define CFM_PASSKEY 0x03 #define REQ_OOB 0x04 +#define DSP_PASSKEY 0x05 #define OVERLAP 0xFF static const u8 gen_method[5][5] = { @@ -440,6 +751,14 @@ static const u8 gen_method[5][5] = { { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, }; +static const u8 sc_method[5][5] = { + { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, + { JUST_WORKS, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, + { DSP_PASSKEY, DSP_PASSKEY, REQ_PASSKEY, JUST_WORKS, DSP_PASSKEY }, + { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM }, + { DSP_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, +}; + static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io) { /* If either side has unknown io_caps, use JUST_CFM (which gets @@ -449,6 +768,9 @@ static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io) remote_io > SMP_IO_KEYBOARD_DISPLAY) return JUST_CFM; + if (test_bit(SMP_FLAG_SC, &smp->flags)) + return sc_method[remote_io][local_io]; + return gen_method[remote_io][local_io]; } @@ -458,7 +780,6 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; - u8 method; u32 passkey = 0; int ret = 0; @@ -475,26 +796,28 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, * table. */ if (!(auth & SMP_AUTH_MITM)) - method = JUST_CFM; + smp->method = JUST_CFM; else - method = get_auth_method(smp, local_io, remote_io); + smp->method = get_auth_method(smp, local_io, remote_io); /* Don't confirm locally initiated pairing attempts */ - if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags)) - method = JUST_WORKS; + if (smp->method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, + &smp->flags)) + smp->method = JUST_WORKS; /* Don't bother user space with no IO capabilities */ - if (method == JUST_CFM && hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) - method = JUST_WORKS; + if (smp->method == JUST_CFM && + hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) + smp->method = JUST_WORKS; /* If Just Works, Continue with Zero TK */ - if (method == JUST_WORKS) { + if (smp->method == JUST_WORKS) { set_bit(SMP_FLAG_TK_VALID, &smp->flags); return 0; } /* Not Just Works/Confirm results in MITM Authentication */ - if (method != JUST_CFM) { + if (smp->method != JUST_CFM) { set_bit(SMP_FLAG_MITM_AUTH, &smp->flags); if (hcon->pending_sec_level < BT_SECURITY_HIGH) hcon->pending_sec_level = BT_SECURITY_HIGH; @@ -503,15 +826,15 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, /* If both devices have Keyoard-Display I/O, the master * Confirms and the slave Enters the passkey. */ - if (method == OVERLAP) { + if (smp->method == OVERLAP) { if (hcon->role == HCI_ROLE_MASTER) - method = CFM_PASSKEY; + smp->method = CFM_PASSKEY; else - method = REQ_PASSKEY; + smp->method = REQ_PASSKEY; } /* Generate random passkey. */ - if (method == CFM_PASSKEY) { + if (smp->method == CFM_PASSKEY) { memset(smp->tk, 0, sizeof(smp->tk)); get_random_bytes(&passkey, sizeof(passkey)); passkey %= 1000000; @@ -520,12 +843,10 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, set_bit(SMP_FLAG_TK_VALID, &smp->flags); } - hci_dev_lock(hcon->hdev); - - if (method == REQ_PASSKEY) + if (smp->method == REQ_PASSKEY) ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type); - else if (method == JUST_CFM) + else if (smp->method == JUST_CFM) ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, passkey, 1); @@ -534,8 +855,6 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, hcon->type, hcon->dst_type, passkey, 0); - hci_dev_unlock(hcon->hdev); - return ret; } @@ -547,7 +866,7 @@ static u8 smp_confirm(struct smp_chan *smp) BT_DBG("conn %p", conn); - ret = smp_c1(smp, smp->tk, smp->prnd, smp->preq, smp->prsp, + ret = smp_c1(smp->tfm_aes, smp->tk, smp->prnd, smp->preq, smp->prsp, conn->hcon->init_addr_type, &conn->hcon->init_addr, conn->hcon->resp_addr_type, &conn->hcon->resp_addr, cp.confirm_val); @@ -578,7 +897,7 @@ static u8 smp_random(struct smp_chan *smp) BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); - ret = smp_c1(smp, smp->tk, smp->rrnd, smp->preq, smp->prsp, + ret = smp_c1(smp->tfm_aes, smp->tk, smp->rrnd, smp->preq, smp->prsp, hcon->init_addr_type, &hcon->init_addr, hcon->resp_addr_type, &hcon->resp_addr, confirm); if (ret) @@ -594,7 +913,7 @@ static u8 smp_random(struct smp_chan *smp) __le64 rand = 0; __le16 ediv = 0; - smp_s1(smp, smp->tk, smp->rrnd, smp->prnd, stk); + smp_s1(smp->tfm_aes, smp->tk, smp->rrnd, smp->prnd, stk); memset(stk + smp->enc_key_size, 0, SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); @@ -613,7 +932,7 @@ static u8 smp_random(struct smp_chan *smp) smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); - smp_s1(smp, smp->tk, smp->prnd, smp->rrnd, stk); + smp_s1(smp->tfm_aes, smp->tk, smp->prnd, smp->rrnd, stk); memset(stk + smp->enc_key_size, 0, SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); @@ -648,11 +967,13 @@ static void smp_notify_keys(struct l2cap_conn *conn) mgmt_new_irk(hdev, smp->remote_irk); /* Now that user space can be considered to know the * identity address track the connection based on it - * from now on. + * from now on (assuming this is an LE link). */ - bacpy(&hcon->dst, &smp->remote_irk->bdaddr); - hcon->dst_type = smp->remote_irk->addr_type; - queue_work(hdev->workqueue, &conn->id_addr_update_work); + if (hcon->type == LE_LINK) { + bacpy(&hcon->dst, &smp->remote_irk->bdaddr); + hcon->dst_type = smp->remote_irk->addr_type; + queue_work(hdev->workqueue, &conn->id_addr_update_work); + } /* When receiving an indentity resolving key for * a remote device that does not use a resolvable @@ -665,19 +986,29 @@ static void smp_notify_keys(struct l2cap_conn *conn) * just remove it. */ if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) { - list_del(&smp->remote_irk->list); - kfree(smp->remote_irk); + list_del_rcu(&smp->remote_irk->list); + kfree_rcu(smp->remote_irk, rcu); smp->remote_irk = NULL; } } - /* The LTKs and CSRKs should be persistent only if both sides - * had the bonding bit set in their authentication requests. - */ - persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING); - - if (smp->csrk) { - smp->csrk->bdaddr_type = hcon->dst_type; + if (hcon->type == ACL_LINK) { + if (hcon->key_type == HCI_LK_DEBUG_COMBINATION) + persistent = false; + else + persistent = !test_bit(HCI_CONN_FLUSH_KEY, + &hcon->flags); + } else { + /* The LTKs and CSRKs should be persistent only if both sides + * had the bonding bit set in their authentication requests. + */ + persistent = !!((req->auth_req & rsp->auth_req) & + SMP_AUTH_BONDING); + } + + + if (smp->csrk) { + smp->csrk->bdaddr_type = hcon->dst_type; bacpy(&smp->csrk->bdaddr, &hcon->dst); mgmt_new_csrk(hdev, smp->csrk, persistent); } @@ -699,6 +1030,81 @@ static void smp_notify_keys(struct l2cap_conn *conn) bacpy(&smp->slave_ltk->bdaddr, &hcon->dst); mgmt_new_ltk(hdev, smp->slave_ltk, persistent); } + + if (smp->link_key) { + struct link_key *key; + u8 type; + + if (test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags)) + type = HCI_LK_DEBUG_COMBINATION; + else if (hcon->sec_level == BT_SECURITY_FIPS) + type = HCI_LK_AUTH_COMBINATION_P256; + else + type = HCI_LK_UNAUTH_COMBINATION_P256; + + key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst, + smp->link_key, type, 0, &persistent); + if (key) { + mgmt_new_link_key(hdev, key, persistent); + + /* Don't keep debug keys around if the relevant + * flag is not set. + */ + if (!test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags) && + key->type == HCI_LK_DEBUG_COMBINATION) { + list_del_rcu(&key->list); + kfree_rcu(key, rcu); + } + } + } +} + +static void sc_add_ltk(struct smp_chan *smp) +{ + struct hci_conn *hcon = smp->conn->hcon; + u8 key_type, auth; + + if (test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags)) + key_type = SMP_LTK_P256_DEBUG; + else + key_type = SMP_LTK_P256; + + if (hcon->pending_sec_level == BT_SECURITY_FIPS) + auth = 1; + else + auth = 0; + + memset(smp->tk + smp->enc_key_size, 0, + SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); + + smp->ltk = hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, + key_type, auth, smp->tk, smp->enc_key_size, + 0, 0); +} + +static void sc_generate_link_key(struct smp_chan *smp) +{ + /* These constants are as specified in the core specification. + * In ASCII they spell out to 'tmp1' and 'lebr'. + */ + const u8 tmp1[4] = { 0x31, 0x70, 0x6d, 0x74 }; + const u8 lebr[4] = { 0x72, 0x62, 0x65, 0x6c }; + + smp->link_key = kzalloc(16, GFP_KERNEL); + if (!smp->link_key) + return; + + if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) { + kfree(smp->link_key); + smp->link_key = NULL; + return; + } + + if (smp_h6(smp->tfm_cmac, smp->link_key, lebr, smp->link_key)) { + kfree(smp->link_key); + smp->link_key = NULL; + return; + } } static void smp_allow_key_dist(struct smp_chan *smp) @@ -715,6 +1121,35 @@ static void smp_allow_key_dist(struct smp_chan *smp) SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); } +static void sc_generate_ltk(struct smp_chan *smp) +{ + /* These constants are as specified in the core specification. + * In ASCII they spell out to 'tmp2' and 'brle'. + */ + const u8 tmp2[4] = { 0x32, 0x70, 0x6d, 0x74 }; + const u8 brle[4] = { 0x65, 0x6c, 0x72, 0x62 }; + struct hci_conn *hcon = smp->conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct link_key *key; + + key = hci_find_link_key(hdev, &hcon->dst); + if (!key) { + BT_ERR("%s No Link Key found to generate LTK", hdev->name); + return; + } + + if (key->type == HCI_LK_DEBUG_COMBINATION) + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); + + if (smp_h6(smp->tfm_cmac, key->val, tmp2, smp->tk)) + return; + + if (smp_h6(smp->tfm_cmac, smp->tk, brle, smp->tk)) + return; + + sc_add_ltk(smp); +} + static void smp_distribute_keys(struct smp_chan *smp) { struct smp_cmd_pairing *req, *rsp; @@ -743,6 +1178,16 @@ static void smp_distribute_keys(struct smp_chan *smp) *keydist &= req->resp_key_dist; } + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + if (hcon->type == LE_LINK && (*keydist & SMP_DIST_LINK_KEY)) + sc_generate_link_key(smp); + if (hcon->type == ACL_LINK && (*keydist & SMP_DIST_ENC_KEY)) + sc_generate_ltk(smp); + + /* Clear the keys which are generated but not distributed */ + *keydist &= ~SMP_SC_NO_DIST; + } + BT_DBG("keydist 0x%x", *keydist); if (*keydist & SMP_DIST_ENC_KEY) { @@ -854,6 +1299,14 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) return NULL; } + smp->tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(smp->tfm_cmac)) { + BT_ERR("Unable to create CMAC crypto context"); + crypto_free_blkcipher(smp->tfm_aes); + kfree(smp); + return NULL; + } + smp->conn = conn; chan->data = smp; @@ -866,6 +1319,213 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) return smp; } +static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16]) +{ + struct hci_conn *hcon = smp->conn->hcon; + u8 *na, *nb, a[7], b[7]; + + if (hcon->out) { + na = smp->prnd; + nb = smp->rrnd; + } else { + na = smp->rrnd; + nb = smp->prnd; + } + + memcpy(a, &hcon->init_addr, 6); + memcpy(b, &hcon->resp_addr, 6); + a[6] = hcon->init_addr_type; + b[6] = hcon->resp_addr_type; + + return smp_f5(smp->tfm_cmac, smp->dhkey, na, nb, a, b, mackey, ltk); +} + +static void sc_dhkey_check(struct smp_chan *smp) +{ + struct hci_conn *hcon = smp->conn->hcon; + struct smp_cmd_dhkey_check check; + u8 a[7], b[7], *local_addr, *remote_addr; + u8 io_cap[3], r[16]; + + memcpy(a, &hcon->init_addr, 6); + memcpy(b, &hcon->resp_addr, 6); + a[6] = hcon->init_addr_type; + b[6] = hcon->resp_addr_type; + + if (hcon->out) { + local_addr = a; + remote_addr = b; + memcpy(io_cap, &smp->preq[1], 3); + } else { + local_addr = b; + remote_addr = a; + memcpy(io_cap, &smp->prsp[1], 3); + } + + memset(r, 0, sizeof(r)); + + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + put_unaligned_le32(hcon->passkey_notify, r); + + if (smp->method == REQ_OOB) + memcpy(r, smp->rr, 16); + + smp_f6(smp->tfm_cmac, smp->mackey, smp->prnd, smp->rrnd, r, io_cap, + local_addr, remote_addr, check.e); + + smp_send_cmd(smp->conn, SMP_CMD_DHKEY_CHECK, sizeof(check), &check); +} + +static u8 sc_passkey_send_confirm(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct smp_cmd_pairing_confirm cfm; + u8 r; + + r = ((hcon->passkey_notify >> smp->passkey_round) & 0x01); + r |= 0x80; + + get_random_bytes(smp->prnd, sizeof(smp->prnd)); + + if (smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd, r, + cfm.confirm_val)) + return SMP_UNSPECIFIED; + + smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cfm), &cfm); + + return 0; +} + +static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + u8 cfm[16], r; + + /* Ignore the PDU if we've already done 20 rounds (0 - 19) */ + if (smp->passkey_round >= 20) + return 0; + + switch (smp_op) { + case SMP_CMD_PAIRING_RANDOM: + r = ((hcon->passkey_notify >> smp->passkey_round) & 0x01); + r |= 0x80; + + if (smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk, + smp->rrnd, r, cfm)) + return SMP_UNSPECIFIED; + + if (memcmp(smp->pcnf, cfm, 16)) + return SMP_CONFIRM_FAILED; + + smp->passkey_round++; + + if (smp->passkey_round == 20) { + /* Generate MacKey and LTK */ + if (sc_mackey_and_ltk(smp, smp->mackey, smp->tk)) + return SMP_UNSPECIFIED; + } + + /* The round is only complete when the initiator + * receives pairing random. + */ + if (!hcon->out) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + if (smp->passkey_round == 20) + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + else + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + return 0; + } + + /* Start the next round */ + if (smp->passkey_round != 20) + return sc_passkey_round(smp, 0); + + /* Passkey rounds are complete - start DHKey Check */ + sc_dhkey_check(smp); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + + break; + + case SMP_CMD_PAIRING_CONFIRM: + if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) { + set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); + return 0; + } + + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + + if (hcon->out) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + return 0; + } + + return sc_passkey_send_confirm(smp); + + case SMP_CMD_PUBLIC_KEY: + default: + /* Initiating device starts the round */ + if (!hcon->out) + return 0; + + BT_DBG("%s Starting passkey round %u", hdev->name, + smp->passkey_round + 1); + + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + + return sc_passkey_send_confirm(smp); + } + + return 0; +} + +static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + u8 smp_op; + + clear_bit(SMP_FLAG_WAIT_USER, &smp->flags); + + switch (mgmt_op) { + case MGMT_OP_USER_PASSKEY_NEG_REPLY: + smp_failure(smp->conn, SMP_PASSKEY_ENTRY_FAILED); + return 0; + case MGMT_OP_USER_CONFIRM_NEG_REPLY: + smp_failure(smp->conn, SMP_NUMERIC_COMP_FAILED); + return 0; + case MGMT_OP_USER_PASSKEY_REPLY: + hcon->passkey_notify = le32_to_cpu(passkey); + smp->passkey_round = 0; + + if (test_and_clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) + smp_op = SMP_CMD_PAIRING_CONFIRM; + else + smp_op = 0; + + if (sc_passkey_round(smp, smp_op)) + return -EIO; + + return 0; + } + + /* Initiator sends DHKey check first */ + if (hcon->out) { + sc_dhkey_check(smp); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + } else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) { + sc_dhkey_check(smp); + sc_add_ltk(smp); + } + + return 0; +} + int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) { struct l2cap_conn *conn = hcon->l2cap_data; @@ -891,6 +1551,11 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) smp = chan->data; + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + err = sc_user_reply(smp, mgmt_op, passkey); + goto unlock; + } + switch (mgmt_op) { case MGMT_OP_USER_PASSKEY_REPLY: value = le32_to_cpu(passkey); @@ -926,6 +1591,46 @@ unlock: return err; } +static void build_bredr_pairing_cmd(struct smp_chan *smp, + struct smp_cmd_pairing *req, + struct smp_cmd_pairing *rsp) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_dev *hdev = conn->hcon->hdev; + u8 local_dist = 0, remote_dist = 0; + + if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) { + local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; + remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; + } + + if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags)) + remote_dist |= SMP_DIST_ID_KEY; + + if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) + local_dist |= SMP_DIST_ID_KEY; + + if (!rsp) { + memset(req, 0, sizeof(*req)); + + req->init_key_dist = local_dist; + req->resp_key_dist = remote_dist; + req->max_key_size = SMP_MAX_ENC_KEY_SIZE; + + smp->remote_key_dist = remote_dist; + + return; + } + + memset(rsp, 0, sizeof(*rsp)); + + rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; + rsp->init_key_dist = req->init_key_dist & remote_dist; + rsp->resp_key_dist = req->resp_key_dist & local_dist; + + smp->remote_key_dist = rsp->init_key_dist; +} + static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_pairing rsp, *req = (void *) skb->data; @@ -952,16 +1657,49 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) return SMP_UNSPECIFIED; /* We didn't start the pairing, so match remote */ - auth = req->auth_req & AUTH_REQ_MASK; + auth = req->auth_req & AUTH_REQ_MASK(hdev); if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) && (auth & SMP_AUTH_BONDING)) return SMP_PAIRING_NOTSUPP; + if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC)) + return SMP_AUTH_REQUIREMENTS; + smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], req, sizeof(*req)); skb_pull(skb, sizeof(*req)); + /* SMP over BR/EDR requires special treatment */ + if (conn->hcon->type == ACL_LINK) { + /* We must have a BR/EDR SC link */ + if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags)) + return SMP_CROSS_TRANSP_NOT_ALLOWED; + + set_bit(SMP_FLAG_SC, &smp->flags); + + build_bredr_pairing_cmd(smp, req, &rsp); + + key_size = min(req->max_key_size, rsp.max_key_size); + if (check_enc_key_size(conn, key_size)) + return SMP_ENC_KEY_SIZE; + + /* Clear bits which are generated but not distributed */ + smp->remote_key_dist &= ~SMP_SC_NO_DIST; + + smp->prsp[0] = SMP_CMD_PAIRING_RSP; + memcpy(&smp->prsp[1], &rsp, sizeof(rsp)); + smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); + + smp_distribute_keys(smp); + return 0; + } + + build_pairing_cmd(conn, req, &rsp, auth); + + if (rsp.auth_req & SMP_AUTH_SC) + set_bit(SMP_FLAG_SC, &smp->flags); + if (conn->hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) sec_level = BT_SECURITY_MEDIUM; else @@ -970,7 +1708,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) if (sec_level > conn->hcon->pending_sec_level) conn->hcon->pending_sec_level = sec_level; - /* If we need MITM check that it can be acheived */ + /* If we need MITM check that it can be achieved */ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { u8 method; @@ -980,8 +1718,6 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) return SMP_AUTH_REQUIREMENTS; } - build_pairing_cmd(conn, req, &rsp, auth); - key_size = min(req->max_key_size, rsp.max_key_size); if (check_enc_key_size(conn, key_size)) return SMP_ENC_KEY_SIZE; @@ -992,7 +1728,18 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) memcpy(&smp->prsp[1], &rsp, sizeof(rsp)); smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); - SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + + clear_bit(SMP_FLAG_INITIATOR, &smp->flags); + + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY); + /* Clear bits which are generated but not distributed */ + smp->remote_key_dist &= ~SMP_SC_NO_DIST; + /* Wait for Public Key from Initiating Device */ + return 0; + } else { + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + } /* Request setup of TK */ ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability); @@ -1002,11 +1749,46 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } +static u8 sc_send_public_key(struct smp_chan *smp) +{ + struct hci_dev *hdev = smp->conn->hcon->hdev; + + BT_DBG(""); + + if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) { + BT_DBG("Using debug keys"); + memcpy(smp->local_pk, debug_pk, 64); + memcpy(smp->local_sk, debug_sk, 32); + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); + } else { + while (true) { + /* Generate local key pair for Secure Connections */ + if (!ecc_make_key(smp->local_pk, smp->local_sk)) + return SMP_UNSPECIFIED; + + /* This is unlikely, but we need to check that + * we didn't accidentially generate a debug key. + */ + if (memcmp(smp->local_sk, debug_sk, 32)) + break; + } + } + + SMP_DBG("Local Public Key X: %32phN", smp->local_pk); + SMP_DBG("Local Public Key Y: %32phN", &smp->local_pk[32]); + SMP_DBG("Local Private Key: %32phN", smp->local_sk); + + smp_send_cmd(smp->conn, SMP_CMD_PUBLIC_KEY, 64, smp->local_pk); + + return 0; +} + static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_pairing *req, *rsp = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; + struct hci_dev *hdev = conn->hcon->hdev; u8 key_size, auth; int ret; @@ -1026,9 +1808,33 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) if (check_enc_key_size(conn, key_size)) return SMP_ENC_KEY_SIZE; - auth = rsp->auth_req & AUTH_REQ_MASK; + auth = rsp->auth_req & AUTH_REQ_MASK(hdev); + + if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC)) + return SMP_AUTH_REQUIREMENTS; + + smp->prsp[0] = SMP_CMD_PAIRING_RSP; + memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); + + /* Update remote key distribution in case the remote cleared + * some bits that we had enabled in our request. + */ + smp->remote_key_dist &= rsp->resp_key_dist; + + /* For BR/EDR this means we're done and can start phase 3 */ + if (conn->hcon->type == ACL_LINK) { + /* Clear bits which are generated but not distributed */ + smp->remote_key_dist &= ~SMP_SC_NO_DIST; + smp_distribute_keys(smp); + return 0; + } - /* If we need MITM check that it can be acheived */ + if ((req->auth_req & SMP_AUTH_SC) && (auth & SMP_AUTH_SC)) + set_bit(SMP_FLAG_SC, &smp->flags); + else if (conn->hcon->pending_sec_level > BT_SECURITY_HIGH) + conn->hcon->pending_sec_level = BT_SECURITY_HIGH; + + /* If we need MITM check that it can be achieved */ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { u8 method; @@ -1040,14 +1846,18 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) get_random_bytes(smp->prnd, sizeof(smp->prnd)); - smp->prsp[0] = SMP_CMD_PAIRING_RSP; - memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); - /* Update remote key distribution in case the remote cleared * some bits that we had enabled in our request. */ smp->remote_key_dist &= rsp->resp_key_dist; + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + /* Clear bits which are generated but not distributed */ + smp->remote_key_dist &= ~SMP_SC_NO_DIST; + SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY); + return sc_send_public_key(smp); + } + auth |= req->auth_req; ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability); @@ -1063,6 +1873,28 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } +static u8 sc_check_confirm(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + + BT_DBG(""); + + /* Public Key exchange must happen before any other steps */ + if (!test_bit(SMP_FLAG_REMOTE_PK, &smp->flags)) + return SMP_UNSPECIFIED; + + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM); + + if (conn->hcon->out) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), + smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + } + + return 0; +} + static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) { struct l2cap_chan *chan = conn->smp; @@ -1076,35 +1908,110 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf)); skb_pull(skb, sizeof(smp->pcnf)); - if (conn->hcon->out) { + if (test_bit(SMP_FLAG_SC, &smp->flags)) + return sc_check_confirm(smp); + + if (conn->hcon->out) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), + smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + return 0; + } + + if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) + return smp_confirm(smp); + else + set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); + + return 0; +} + +static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; + u8 *pkax, *pkbx, *na, *nb; + u32 passkey; + int err; + + BT_DBG("conn %p", conn); + + if (skb->len < sizeof(smp->rrnd)) + return SMP_INVALID_PARAMS; + + memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd)); + skb_pull(skb, sizeof(smp->rrnd)); + + if (!test_bit(SMP_FLAG_SC, &smp->flags)) + return smp_random(smp); + + if (hcon->out) { + pkax = smp->local_pk; + pkbx = smp->remote_pk; + na = smp->prnd; + nb = smp->rrnd; + } else { + pkax = smp->remote_pk; + pkbx = smp->local_pk; + na = smp->rrnd; + nb = smp->prnd; + } + + if (smp->method == REQ_OOB) { + if (!hcon->out) + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + goto mackey_and_ltk; + } + + /* Passkey entry has special treatment */ + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM); + + if (hcon->out) { + u8 cfm[16]; + + err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk, + smp->rrnd, 0, cfm); + if (err) + return SMP_UNSPECIFIED; + + if (memcmp(smp->pcnf, cfm, 16)) + return SMP_CONFIRM_FAILED; + } else { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); - SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); - return 0; + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); } - if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) - return smp_confirm(smp); - else - set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); - - return 0; -} +mackey_and_ltk: + /* Generate MacKey and LTK */ + err = sc_mackey_and_ltk(smp, smp->mackey, smp->tk); + if (err) + return SMP_UNSPECIFIED; -static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) -{ - struct l2cap_chan *chan = conn->smp; - struct smp_chan *smp = chan->data; + if (smp->method == JUST_WORKS || smp->method == REQ_OOB) { + if (hcon->out) { + sc_dhkey_check(smp); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + } + return 0; + } - BT_DBG("conn %p", conn); + err = smp_g2(smp->tfm_cmac, pkax, pkbx, na, nb, &passkey); + if (err) + return SMP_UNSPECIFIED; - if (skb->len < sizeof(smp->rrnd)) - return SMP_INVALID_PARAMS; + err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type, + hcon->dst_type, passkey, 0); + if (err) + return SMP_UNSPECIFIED; - memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd)); - skb_pull(skb, sizeof(smp->rrnd)); + set_bit(SMP_FLAG_WAIT_USER, &smp->flags); - return smp_random(smp); + return 0; } static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) @@ -1112,8 +2019,7 @@ static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) struct smp_ltk *key; struct hci_conn *hcon = conn->hcon; - key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type, - hcon->role); + key = hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, hcon->role); if (!key) return false; @@ -1132,20 +2038,21 @@ static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) return true; } -bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level) +bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, + enum smp_key_pref key_pref) { if (sec_level == BT_SECURITY_LOW) return true; - /* If we're encrypted with an STK always claim insufficient - * security. This way we allow the connection to be re-encrypted - * with an LTK, even if the LTK provides the same level of - * security. Only exception is if we don't have an LTK (e.g. - * because of key distribution bits). + /* If we're encrypted with an STK but the caller prefers using + * LTK claim insufficient security. This way we allow the + * connection to be re-encrypted with an LTK, even if the LTK + * provides the same level of security. Only exception is if we + * don't have an LTK (e.g. because of key distribution bits). */ - if (test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags) && - hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type, - hcon->role)) + if (key_pref == SMP_USE_LTK && + test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags) && + hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, hcon->role)) return false; if (hcon->sec_level >= sec_level) @@ -1159,6 +2066,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) struct smp_cmd_security_req *rp = (void *) skb->data; struct smp_cmd_pairing cp; struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; struct smp_chan *smp; u8 sec_level, auth; @@ -1170,14 +2078,17 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) if (hcon->role != HCI_ROLE_MASTER) return SMP_CMD_NOTSUPP; - auth = rp->auth_req & AUTH_REQ_MASK; + auth = rp->auth_req & AUTH_REQ_MASK(hdev); + + if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC)) + return SMP_AUTH_REQUIREMENTS; if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) sec_level = BT_SECURITY_MEDIUM; else sec_level = authreq_to_seclevel(auth); - if (smp_sufficient_security(hcon, sec_level)) + if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) return 0; if (sec_level > hcon->pending_sec_level) @@ -1227,7 +2138,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) return 1; - if (smp_sufficient_security(hcon, sec_level)) + if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) return 1; if (sec_level > hcon->pending_sec_level) @@ -1253,6 +2164,9 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) authreq = seclevel_to_authreq(sec_level); + if (test_bit(HCI_SC_ENABLED, &hcon->hdev->dev_flags)) + authreq |= SMP_AUTH_SC; + /* Require MITM if IO Capability allows or the security level * requires it. */ @@ -1329,7 +2243,6 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) skb_pull(skb, sizeof(*rp)); - hci_dev_lock(hdev); authenticated = (hcon->sec_level == BT_SECURITY_HIGH); ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, SMP_LTK, authenticated, smp->tk, smp->enc_key_size, @@ -1337,7 +2250,6 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) smp->ltk = ltk; if (!(smp->remote_key_dist & KEY_DIST_MASK)) smp_distribute_keys(smp); - hci_dev_unlock(hdev); return 0; } @@ -1384,8 +2296,6 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, skb_pull(skb, sizeof(*info)); - hci_dev_lock(hcon->hdev); - /* Strictly speaking the Core Specification (4.1) allows sending * an empty address which would force us to rely on just the IRK * as "identity information". However, since such @@ -1413,8 +2323,6 @@ distribute: if (!(smp->remote_key_dist & KEY_DIST_MASK)) smp_distribute_keys(smp); - hci_dev_unlock(hcon->hdev); - return 0; } @@ -1423,7 +2331,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) struct smp_cmd_sign_info *rp = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; - struct hci_dev *hdev = conn->hcon->hdev; struct smp_csrk *csrk; BT_DBG("conn %p", conn); @@ -1436,7 +2343,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) skb_pull(skb, sizeof(*rp)); - hci_dev_lock(hdev); csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); if (csrk) { csrk->master = 0x01; @@ -1444,7 +2350,234 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) } smp->csrk = csrk; smp_distribute_keys(smp); - hci_dev_unlock(hdev); + + return 0; +} + +static u8 sc_select_method(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct smp_cmd_pairing *local, *remote; + u8 local_mitm, remote_mitm, local_io, remote_io, method; + + if (test_bit(SMP_FLAG_OOB, &smp->flags)) + return REQ_OOB; + + /* The preq/prsp contain the raw Pairing Request/Response PDUs + * which are needed as inputs to some crypto functions. To get + * the "struct smp_cmd_pairing" from them we need to skip the + * first byte which contains the opcode. + */ + if (hcon->out) { + local = (void *) &smp->preq[1]; + remote = (void *) &smp->prsp[1]; + } else { + local = (void *) &smp->prsp[1]; + remote = (void *) &smp->preq[1]; + } + + local_io = local->io_capability; + remote_io = remote->io_capability; + + local_mitm = (local->auth_req & SMP_AUTH_MITM); + remote_mitm = (remote->auth_req & SMP_AUTH_MITM); + + /* If either side wants MITM, look up the method from the table, + * otherwise use JUST WORKS. + */ + if (local_mitm || remote_mitm) + method = get_auth_method(smp, local_io, remote_io); + else + method = JUST_WORKS; + + /* Don't confirm locally initiated pairing attempts */ + if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + method = JUST_WORKS; + + return method; +} + +static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_public_key *key = (void *) skb->data; + struct hci_conn *hcon = conn->hcon; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_dev *hdev = hcon->hdev; + struct smp_cmd_pairing_confirm cfm; + int err; + + BT_DBG("conn %p", conn); + + if (skb->len < sizeof(*key)) + return SMP_INVALID_PARAMS; + + memcpy(smp->remote_pk, key, 64); + + /* Non-initiating device sends its public key after receiving + * the key from the initiating device. + */ + if (!hcon->out) { + err = sc_send_public_key(smp); + if (err) + return err; + } + + SMP_DBG("Remote Public Key X: %32phN", smp->remote_pk); + SMP_DBG("Remote Public Key Y: %32phN", &smp->remote_pk[32]); + + if (!ecdh_shared_secret(smp->remote_pk, smp->local_sk, smp->dhkey)) + return SMP_UNSPECIFIED; + + SMP_DBG("DHKey %32phN", smp->dhkey); + + set_bit(SMP_FLAG_REMOTE_PK, &smp->flags); + + smp->method = sc_select_method(smp); + + BT_DBG("%s selected method 0x%02x", hdev->name, smp->method); + + /* JUST_WORKS and JUST_CFM result in an unauthenticated key */ + if (smp->method == JUST_WORKS || smp->method == JUST_CFM) + hcon->pending_sec_level = BT_SECURITY_MEDIUM; + else + hcon->pending_sec_level = BT_SECURITY_FIPS; + + if (!memcmp(debug_pk, smp->remote_pk, 64)) + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); + + if (smp->method == DSP_PASSKEY) { + get_random_bytes(&hcon->passkey_notify, + sizeof(hcon->passkey_notify)); + hcon->passkey_notify %= 1000000; + hcon->passkey_entered = 0; + smp->passkey_round = 0; + if (mgmt_user_passkey_notify(hdev, &hcon->dst, hcon->type, + hcon->dst_type, + hcon->passkey_notify, + hcon->passkey_entered)) + return SMP_UNSPECIFIED; + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + return sc_passkey_round(smp, SMP_CMD_PUBLIC_KEY); + } + + if (smp->method == REQ_OOB) { + err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk, + smp->rr, 0, cfm.confirm_val); + if (err) + return SMP_UNSPECIFIED; + + if (memcmp(cfm.confirm_val, smp->pcnf, 16)) + return SMP_CONFIRM_FAILED; + + if (hcon->out) + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + + return 0; + } + + if (hcon->out) + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + + if (smp->method == REQ_PASSKEY) { + if (mgmt_user_passkey_request(hdev, &hcon->dst, hcon->type, + hcon->dst_type)) + return SMP_UNSPECIFIED; + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + set_bit(SMP_FLAG_WAIT_USER, &smp->flags); + return 0; + } + + /* The Initiating device waits for the non-initiating device to + * send the confirm value. + */ + if (conn->hcon->out) + return 0; + + err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd, + 0, cfm.confirm_val); + if (err) + return SMP_UNSPECIFIED; + + smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cfm), &cfm); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + + return 0; +} + +static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_dhkey_check *check = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; + struct hci_conn *hcon = conn->hcon; + struct smp_chan *smp = chan->data; + u8 a[7], b[7], *local_addr, *remote_addr; + u8 io_cap[3], r[16], e[16]; + int err; + + BT_DBG("conn %p", conn); + + if (skb->len < sizeof(*check)) + return SMP_INVALID_PARAMS; + + memcpy(a, &hcon->init_addr, 6); + memcpy(b, &hcon->resp_addr, 6); + a[6] = hcon->init_addr_type; + b[6] = hcon->resp_addr_type; + + if (hcon->out) { + local_addr = a; + remote_addr = b; + memcpy(io_cap, &smp->prsp[1], 3); + } else { + local_addr = b; + remote_addr = a; + memcpy(io_cap, &smp->preq[1], 3); + } + + memset(r, 0, sizeof(r)); + + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + put_unaligned_le32(hcon->passkey_notify, r); + + err = smp_f6(smp->tfm_cmac, smp->mackey, smp->rrnd, smp->prnd, r, + io_cap, remote_addr, local_addr, e); + if (err) + return SMP_UNSPECIFIED; + + if (memcmp(check->e, e, 16)) + return SMP_DHKEY_CHECK_FAILED; + + if (!hcon->out) { + if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) { + set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags); + return 0; + } + + /* Slave sends DHKey check as response to master */ + sc_dhkey_check(smp); + } + + sc_add_ltk(smp); + + if (hcon->out) { + hci_le_start_enc(hcon, 0, 0, smp->tk); + hcon->enc_key_size = smp->enc_key_size; + } + + return 0; +} + +static int smp_cmd_keypress_notify(struct l2cap_conn *conn, + struct sk_buff *skb) +{ + struct smp_cmd_keypress_notify *kp = (void *) skb->data; + + BT_DBG("value 0x%02x", kp->value); return 0; } @@ -1457,11 +2590,6 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb) __u8 code, reason; int err = 0; - if (hcon->type != LE_LINK) { - kfree_skb(skb); - return 0; - } - if (skb->len < 1) return -EILSEQ; @@ -1533,6 +2661,18 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb) reason = smp_cmd_sign_info(conn, skb); break; + case SMP_CMD_PUBLIC_KEY: + reason = smp_cmd_public_key(conn, skb); + break; + + case SMP_CMD_DHKEY_CHECK: + reason = smp_cmd_dhkey_check(conn, skb); + break; + + case SMP_CMD_KEYPRESS_NOTIFY: + reason = smp_cmd_keypress_notify(conn, skb); + break; + default: BT_DBG("Unknown command code 0x%2.2x", code); reason = SMP_CMD_NOTSUPP; @@ -1568,6 +2708,74 @@ static void smp_teardown_cb(struct l2cap_chan *chan, int err) l2cap_chan_put(chan); } +static void bredr_pairing(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct smp_cmd_pairing req; + struct smp_chan *smp; + + BT_DBG("chan %p", chan); + + /* Only new pairings are interesting */ + if (!test_bit(HCI_CONN_NEW_LINK_KEY, &hcon->flags)) + return; + + /* Don't bother if we're not encrypted */ + if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) + return; + + /* Only master may initiate SMP over BR/EDR */ + if (hcon->role != HCI_ROLE_MASTER) + return; + + /* Secure Connections support must be enabled */ + if (!test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) + return; + + /* BR/EDR must use Secure Connections for SMP */ + if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) && + !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags)) + return; + + /* If our LE support is not enabled don't do anything */ + if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) + return; + + /* Don't bother if remote LE support is not enabled */ + if (!lmp_host_le_capable(hcon)) + return; + + /* Remote must support SMP fixed chan for BR/EDR */ + if (!(conn->remote_fixed_chan & L2CAP_FC_SMP_BREDR)) + return; + + /* Don't bother if SMP is already ongoing */ + if (chan->data) + return; + + smp = smp_chan_create(conn); + if (!smp) { + BT_ERR("%s unable to create SMP context for BR/EDR", + hdev->name); + return; + } + + set_bit(SMP_FLAG_SC, &smp->flags); + + BT_DBG("%s starting SMP over BR/EDR", hdev->name); + + /* Prepare and send the BR/EDR SMP Pairing Request */ + build_bredr_pairing_cmd(smp, &req, NULL); + + smp->preq[0] = SMP_CMD_PAIRING_REQ; + memcpy(&smp->preq[1], &req, sizeof(req)); + + smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); +} + static void smp_resume_cb(struct l2cap_chan *chan) { struct smp_chan *smp = chan->data; @@ -1576,6 +2784,11 @@ static void smp_resume_cb(struct l2cap_chan *chan) BT_DBG("chan %p", chan); + if (hcon->type == ACL_LINK) { + bredr_pairing(chan); + return; + } + if (!smp) return; @@ -1590,11 +2803,15 @@ static void smp_resume_cb(struct l2cap_chan *chan) static void smp_ready_cb(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; + struct hci_conn *hcon = conn->hcon; BT_DBG("chan %p", chan); conn->smp = chan; l2cap_chan_hold(chan); + + if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) + bredr_pairing(chan); } static int smp_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) @@ -1647,7 +2864,6 @@ static const struct l2cap_ops smp_chan_ops = { .suspend = l2cap_chan_no_suspend, .set_shutdown = l2cap_chan_no_set_shutdown, .get_sndtimeo = l2cap_chan_no_get_sndtimeo, - .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec, }; static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan) @@ -1668,6 +2884,13 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan) chan->omtu = pchan->omtu; chan->mode = pchan->mode; + /* Other L2CAP channels may request SMP routines in order to + * change the security level. This means that the SMP channel + * lock must be considered in its own category to avoid lockdep + * warnings. + */ + atomic_set(&chan->nesting, L2CAP_NESTING_SMP); + BT_DBG("created chan %p", chan); return chan; @@ -1689,56 +2912,58 @@ static const struct l2cap_ops smp_root_chan_ops = { .resume = l2cap_chan_no_resume, .set_shutdown = l2cap_chan_no_set_shutdown, .get_sndtimeo = l2cap_chan_no_get_sndtimeo, - .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec, }; -int smp_register(struct hci_dev *hdev) +static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) { struct l2cap_chan *chan; struct crypto_blkcipher *tfm_aes; - BT_DBG("%s", hdev->name); + if (cid == L2CAP_CID_SMP_BREDR) { + tfm_aes = NULL; + goto create_chan; + } - tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); + tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, 0); if (IS_ERR(tfm_aes)) { - int err = PTR_ERR(tfm_aes); BT_ERR("Unable to create crypto context"); - return err; + return ERR_PTR(PTR_ERR(tfm_aes)); } +create_chan: chan = l2cap_chan_create(); if (!chan) { crypto_free_blkcipher(tfm_aes); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } chan->data = tfm_aes; - l2cap_add_scid(chan, L2CAP_CID_SMP); + l2cap_add_scid(chan, cid); l2cap_chan_set_defaults(chan); bacpy(&chan->src, &hdev->bdaddr); - chan->src_type = BDADDR_LE_PUBLIC; + if (cid == L2CAP_CID_SMP) + chan->src_type = BDADDR_LE_PUBLIC; + else + chan->src_type = BDADDR_BREDR; chan->state = BT_LISTEN; chan->mode = L2CAP_MODE_BASIC; chan->imtu = L2CAP_DEFAULT_MTU; chan->ops = &smp_root_chan_ops; - hdev->smp_data = chan; + /* Set correct nesting level for a parent/listening channel */ + atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); - return 0; + return chan; } -void smp_unregister(struct hci_dev *hdev) +static void smp_del_chan(struct l2cap_chan *chan) { - struct l2cap_chan *chan = hdev->smp_data; - struct crypto_blkcipher *tfm_aes; - - if (!chan) - return; + struct crypto_blkcipher *tfm_aes; - BT_DBG("%s chan %p", hdev->name, chan); + BT_DBG("chan %p", chan); tfm_aes = chan->data; if (tfm_aes) { @@ -1746,6 +2971,52 @@ void smp_unregister(struct hci_dev *hdev) crypto_free_blkcipher(tfm_aes); } - hdev->smp_data = NULL; l2cap_chan_put(chan); } + +int smp_register(struct hci_dev *hdev) +{ + struct l2cap_chan *chan; + + BT_DBG("%s", hdev->name); + + chan = smp_add_cid(hdev, L2CAP_CID_SMP); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + hdev->smp_data = chan; + + if (!lmp_sc_capable(hdev) && + !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags)) + return 0; + + chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR); + if (IS_ERR(chan)) { + int err = PTR_ERR(chan); + chan = hdev->smp_data; + hdev->smp_data = NULL; + smp_del_chan(chan); + return err; + } + + hdev->smp_bredr_data = chan; + + return 0; +} + +void smp_unregister(struct hci_dev *hdev) +{ + struct l2cap_chan *chan; + + if (hdev->smp_bredr_data) { + chan = hdev->smp_bredr_data; + hdev->smp_bredr_data = NULL; + smp_del_chan(chan); + } + + if (hdev->smp_data) { + chan = hdev->smp_data; + hdev->smp_data = NULL; + smp_del_chan(chan); + } +} diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h index 86a683a8b4917fe7ba0567254da8da210909f13c..3296bf42ae80d627c478c5199e167429ca16079b 100644 --- a/net/bluetooth/smp.h +++ b/net/bluetooth/smp.h @@ -50,10 +50,13 @@ struct smp_cmd_pairing { #define SMP_DIST_ENC_KEY 0x01 #define SMP_DIST_ID_KEY 0x02 #define SMP_DIST_SIGN 0x04 +#define SMP_DIST_LINK_KEY 0x08 #define SMP_AUTH_NONE 0x00 #define SMP_AUTH_BONDING 0x01 #define SMP_AUTH_MITM 0x04 +#define SMP_AUTH_SC 0x08 +#define SMP_AUTH_KEYPRESS 0x10 #define SMP_CMD_PAIRING_CONFIRM 0x03 struct smp_cmd_pairing_confirm { @@ -102,7 +105,23 @@ struct smp_cmd_security_req { __u8 auth_req; } __packed; -#define SMP_CMD_MAX 0x0b +#define SMP_CMD_PUBLIC_KEY 0x0c +struct smp_cmd_public_key { + __u8 x[32]; + __u8 y[32]; +} __packed; + +#define SMP_CMD_DHKEY_CHECK 0x0d +struct smp_cmd_dhkey_check { + __u8 e[16]; +} __packed; + +#define SMP_CMD_KEYPRESS_NOTIFY 0x0e +struct smp_cmd_keypress_notify { + __u8 value; +} __packed; + +#define SMP_CMD_MAX 0x0e #define SMP_PASSKEY_ENTRY_FAILED 0x01 #define SMP_OOB_NOT_AVAIL 0x02 @@ -114,6 +133,10 @@ struct smp_cmd_security_req { #define SMP_UNSPECIFIED 0x08 #define SMP_REPEATED_ATTEMPTS 0x09 #define SMP_INVALID_PARAMS 0x0a +#define SMP_DHKEY_CHECK_FAILED 0x0b +#define SMP_NUMERIC_COMP_FAILED 0x0c +#define SMP_BREDR_PAIRING_IN_PROGRESS 0x0d +#define SMP_CROSS_TRANSP_NOT_ALLOWED 0x0e #define SMP_MIN_ENC_KEY_SIZE 7 #define SMP_MAX_ENC_KEY_SIZE 16 @@ -123,23 +146,48 @@ enum { SMP_STK, SMP_LTK, SMP_LTK_SLAVE, + SMP_LTK_P256, + SMP_LTK_P256_DEBUG, }; +static inline bool smp_ltk_is_sc(struct smp_ltk *key) +{ + switch (key->type) { + case SMP_LTK_P256: + case SMP_LTK_P256_DEBUG: + return true; + } + + return false; +} + static inline u8 smp_ltk_sec_level(struct smp_ltk *key) { - if (key->authenticated) - return BT_SECURITY_HIGH; + if (key->authenticated) { + if (smp_ltk_is_sc(key)) + return BT_SECURITY_FIPS; + else + return BT_SECURITY_HIGH; + } return BT_SECURITY_MEDIUM; } +/* Key preferences for smp_sufficient security */ +enum smp_key_pref { + SMP_ALLOW_STK, + SMP_USE_LTK, +}; + /* SMP Commands */ -bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level); +bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, + enum smp_key_pref key_pref); int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); -bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr); -int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa); +bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], + const bdaddr_t *bdaddr); +int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa); int smp_register(struct hci_dev *hdev); void smp_unregister(struct hci_dev *hdev); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 6f6c95cfe8f20958ce0d14522526a478549264ef..cc36e59db7d75203d89017e8d458b72370df33da 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -90,7 +90,7 @@ static void fdb_rcu_free(struct rcu_head *head) * are then updated with the new information. * Called under RTNL. */ -static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr) +static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr) { int err; struct net_bridge_port *p; @@ -118,7 +118,7 @@ undo: * the ports with needed information. * Called under RTNL. */ -static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr) +static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr) { struct net_bridge_port *p; @@ -133,7 +133,7 @@ static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr) static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) { if (f->is_static) - fdb_del_hw(br, f->addr.addr); + fdb_del_hw_addr(br, f->addr.addr); hlist_del_rcu(&f->hlist); fdb_notify(br, f, RTM_DELNEIGH); @@ -481,6 +481,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, fdb->is_local = 0; fdb->is_static = 0; fdb->added_by_user = 0; + fdb->added_by_external_learn = 0; fdb->updated = fdb->used = jiffies; hlist_add_head_rcu(&fdb->hlist, head); } @@ -514,7 +515,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, return -ENOMEM; fdb->is_local = fdb->is_static = 1; - fdb_add_hw(br, addr); + fdb_add_hw_addr(br, addr); fdb_notify(br, fdb, RTM_NEWNEIGH); return 0; } @@ -613,7 +614,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, ndm->ndm_family = AF_BRIDGE; ndm->ndm_pad1 = 0; ndm->ndm_pad2 = 0; - ndm->ndm_flags = 0; + ndm->ndm_flags = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0; ndm->ndm_type = 0; ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; ndm->ndm_state = fdb_to_nud(fdb); @@ -754,19 +755,19 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, fdb->is_local = 1; if (!fdb->is_static) { fdb->is_static = 1; - fdb_add_hw(br, addr); + fdb_add_hw_addr(br, addr); } } else if (state & NUD_NOARP) { fdb->is_local = 0; if (!fdb->is_static) { fdb->is_static = 1; - fdb_add_hw(br, addr); + fdb_add_hw_addr(br, addr); } } else { fdb->is_local = 0; if (fdb->is_static) { fdb->is_static = 0; - fdb_del_hw(br, addr); + fdb_del_hw_addr(br, addr); } } @@ -805,33 +806,17 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, /* Add new permanent fdb entry with RTM_NEWNEIGH */ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 nlh_flags) + const unsigned char *addr, u16 vid, u16 nlh_flags) { struct net_bridge_port *p; int err = 0; struct net_port_vlans *pv; - unsigned short vid = VLAN_N_VID; if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); return -EINVAL; } - if (tb[NDA_VLAN]) { - if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) { - pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n"); - return -EINVAL; - } - - vid = nla_get_u16(tb[NDA_VLAN]); - - if (!vid || vid >= VLAN_VID_MASK) { - pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", - vid); - return -EINVAL; - } - } - if (is_zero_ether_addr(addr)) { pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n"); return -EINVAL; @@ -845,7 +830,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } pv = nbp_get_vlan_info(p); - if (vid != VLAN_N_VID) { + if (vid) { if (!pv || !test_bit(vid, pv->vlan_bitmap)) { pr_info("bridge: RTM_NEWNEIGH with unconfigured " "vlan %d on port %s\n", vid, dev->name); @@ -903,27 +888,12 @@ static int __br_fdb_delete(struct net_bridge_port *p, /* Remove neighbor entry with RTM_DELNEIGH */ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr) + const unsigned char *addr, u16 vid) { struct net_bridge_port *p; int err; struct net_port_vlans *pv; - unsigned short vid = VLAN_N_VID; - if (tb[NDA_VLAN]) { - if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) { - pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n"); - return -EINVAL; - } - - vid = nla_get_u16(tb[NDA_VLAN]); - - if (!vid || vid >= VLAN_VID_MASK) { - pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", - vid); - return -EINVAL; - } - } p = br_port_get_rtnl(dev); if (p == NULL) { pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", @@ -932,7 +902,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], } pv = nbp_get_vlan_info(p); - if (vid != VLAN_N_VID) { + if (vid) { if (!pv || !test_bit(vid, pv->vlan_bitmap)) { pr_info("bridge: RTM_DELNEIGH with unconfigured " "vlan %d on port %s\n", vid, dev->name); @@ -1014,3 +984,91 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p) } } } + +int br_fdb_external_learn_add(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct net_bridge_port *p; + struct net_bridge *br; + struct hlist_head *head; + struct net_bridge_fdb_entry *fdb; + int err = 0; + + rtnl_lock(); + + p = br_port_get_rtnl(dev); + if (!p) { + pr_info("bridge: %s not a bridge port\n", dev->name); + err = -EINVAL; + goto err_rtnl_unlock; + } + + br = p->br; + + spin_lock_bh(&br->hash_lock); + + head = &br->hash[br_mac_hash(addr, vid)]; + fdb = fdb_find(head, addr, vid); + if (!fdb) { + fdb = fdb_create(head, p, addr, vid); + if (!fdb) { + err = -ENOMEM; + goto err_unlock; + } + fdb->added_by_external_learn = 1; + fdb_notify(br, fdb, RTM_NEWNEIGH); + } else if (fdb->added_by_external_learn) { + /* Refresh entry */ + fdb->updated = fdb->used = jiffies; + } else if (!fdb->added_by_user) { + /* Take over SW learned entry */ + fdb->added_by_external_learn = 1; + fdb->updated = jiffies; + fdb_notify(br, fdb, RTM_NEWNEIGH); + } + +err_unlock: + spin_unlock_bh(&br->hash_lock); +err_rtnl_unlock: + rtnl_unlock(); + + return err; +} +EXPORT_SYMBOL(br_fdb_external_learn_add); + +int br_fdb_external_learn_del(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct net_bridge_port *p; + struct net_bridge *br; + struct hlist_head *head; + struct net_bridge_fdb_entry *fdb; + int err = 0; + + rtnl_lock(); + + p = br_port_get_rtnl(dev); + if (!p) { + pr_info("bridge: %s not a bridge port\n", dev->name); + err = -EINVAL; + goto err_rtnl_unlock; + } + + br = p->br; + + spin_lock_bh(&br->hash_lock); + + head = &br->hash[br_mac_hash(addr, vid)]; + fdb = fdb_find(head, addr, vid); + if (fdb && fdb->added_by_external_learn) + fdb_delete(br, fdb); + else + err = -ENOENT; + + spin_unlock_bh(&br->hash_lock); +err_rtnl_unlock: + rtnl_unlock(); + + return err; +} +EXPORT_SYMBOL(br_fdb_external_learn_del); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 44cb786b925a2e857633b47b8ce3deaa22c6a5d8..f96933a823e327fe56f64d1005101309e78a8e49 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -184,6 +184,11 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, /* Do not flood unicast traffic to ports that turn it off */ if (unicast && !(p->flags & BR_FLOOD)) continue; + + /* Do not flood to ports that enable proxy ARP */ + if (p->flags & BR_PROXYARP) + continue; + prev = maybe_deliver(prev, p, skb, __packet_hook); if (IS_ERR(prev)) goto out; diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 6fd5522df696ce744558a4db82803c34394eed6f..1f1de715197c19ab12f5333e9a518a317754f041 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include #include "br_private.h" @@ -57,6 +59,60 @@ static int br_pass_frame_up(struct sk_buff *skb) netif_receive_skb); } +static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, + u16 vid) +{ + struct net_device *dev = br->dev; + struct neighbour *n; + struct arphdr *parp; + u8 *arpptr, *sha; + __be32 sip, tip; + + if (dev->flags & IFF_NOARP) + return; + + if (!pskb_may_pull(skb, arp_hdr_len(dev))) { + dev->stats.tx_dropped++; + return; + } + parp = arp_hdr(skb); + + if (parp->ar_pro != htons(ETH_P_IP) || + parp->ar_op != htons(ARPOP_REQUEST) || + parp->ar_hln != dev->addr_len || + parp->ar_pln != 4) + return; + + arpptr = (u8 *)parp + sizeof(struct arphdr); + sha = arpptr; + arpptr += dev->addr_len; /* sha */ + memcpy(&sip, arpptr, sizeof(sip)); + arpptr += sizeof(sip); + arpptr += dev->addr_len; /* tha */ + memcpy(&tip, arpptr, sizeof(tip)); + + if (ipv4_is_loopback(tip) || + ipv4_is_multicast(tip)) + return; + + n = neigh_lookup(&arp_tbl, &tip, dev); + if (n) { + struct net_bridge_fdb_entry *f; + + if (!(n->nud_state & NUD_VALID)) { + neigh_release(n); + return; + } + + f = __br_fdb_get(br, n->ha, vid); + if (f) + arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip, + sha, n->ha, sha); + + neigh_release(n); + } +} + /* note: already called with rcu_read_lock */ int br_handle_frame_finish(struct sk_buff *skb) { @@ -98,6 +154,10 @@ int br_handle_frame_finish(struct sk_buff *skb) dst = NULL; if (is_broadcast_ether_addr(dest)) { + if (p->flags & BR_PROXYARP && + skb->protocol == htons(ETH_P_ARP)) + br_do_proxy_arp(skb, br, vid); + skb2 = skb; unicast = false; } else if (is_multicast_ether_addr(dest)) { diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 1a4f32c09ad51707c063f6eebe14abe747ed0d26..c190d22b6b3d9e96ce91c182ea561a6c02c6e125 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include "br_private.h" diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e5ec470b851f1f55fe37ec4ecaf557d75483af26..9f5eb55a4d3a7e4b51cc3fa3fa0783a52e6f98fa 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -60,7 +60,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || - nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD))) + nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) || + nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP))) return -EMSGSIZE; return 0; @@ -333,6 +334,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); + br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 4d783d071305a4d7aa683aaa74b57088db6142a8..aea3d1339b3f3d248a9d5517725dbe3732c03d8e 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -98,9 +98,10 @@ struct net_bridge_fdb_entry unsigned long updated; unsigned long used; mac_addr addr; - unsigned char is_local; - unsigned char is_static; - unsigned char added_by_user; + unsigned char is_local:1, + is_static:1, + added_by_user:1, + added_by_external_learn:1; __u16 vlan_id; }; @@ -163,15 +164,6 @@ struct net_bridge_port struct rcu_head rcu; unsigned long flags; -#define BR_HAIRPIN_MODE 0x00000001 -#define BR_BPDU_GUARD 0x00000002 -#define BR_ROOT_BLOCK 0x00000004 -#define BR_MULTICAST_FAST_LEAVE 0x00000008 -#define BR_ADMIN_COST 0x00000010 -#define BR_LEARNING 0x00000020 -#define BR_FLOOD 0x00000040 -#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) -#define BR_PROMISC 0x00000080 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING struct bridge_mcast_own_query ip4_own_query; @@ -403,9 +395,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid, bool added_by_user); int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr); + struct net_device *dev, const unsigned char *addr, u16 vid); int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 nlh_flags); + const unsigned char *addr, u16 vid, u16 nlh_flags); int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, struct net_device *fdev, int idx); int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p); diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 2b047bcf42a4eb1c450c0283af9bea2676052836..fb3ebe6155134b4532ad4c85b2fea2260f0d6140 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -12,6 +12,7 @@ */ #include #include +#include #include "br_private.h" #include "br_private_stp.h" @@ -38,7 +39,13 @@ void br_log_state(const struct net_bridge_port *p) void br_set_state(struct net_bridge_port *p, unsigned int state) { + int err; + p->state = state; + err = netdev_switch_port_stp_update(p->dev, state); + if (err && err != -EOPNOTSUPP) + br_warn(p->br, "error setting offload STP state on port %u(%s)\n", + (unsigned int) p->port_no, p->dev->name); } /* called under bridge lock */ diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index e561cd59b8a6ef0e764b3028d350b13954deac05..2de5d91199e8172f9356b104bbcfa772ff460d45 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -170,6 +170,7 @@ BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD); BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); BRPORT_ATTR_FLAG(learning, BR_LEARNING); BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD); +BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) @@ -213,6 +214,7 @@ static const struct brport_attribute *brport_attrs[] = { &brport_attr_multicast_router, &brport_attr_multicast_fast_leave, #endif + &brport_attr_proxyarp, NULL }; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 150048fb99b08eeeb25aac140721b973731ebf61..97b8ddf573634b3d7fe9a40979ea5db0c3e01377 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -199,8 +199,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, if (skb->vlan_proto != proto) { /* Protocol-mismatch, empty out vlan_tci for new tag */ skb_push(skb, ETH_HLEN); - skb = __vlan_put_tag(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); if (unlikely(!skb)) return false; diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c index 074c557ab505eb75bd0970a3045a78f0ca5c85dd..19473a9371b8a65ed6b1e8727a5c2b321862886b 100644 --- a/net/bridge/netfilter/nf_tables_bridge.c +++ b/net/bridge/netfilter/nf_tables_bridge.c @@ -13,6 +13,82 @@ #include #include #include +#include +#include +#include +#include +#include + +int nft_bridge_iphdr_validate(struct sk_buff *skb) +{ + struct iphdr *iph; + u32 len; + + if (!pskb_may_pull(skb, sizeof(struct iphdr))) + return 0; + + iph = ip_hdr(skb); + if (iph->ihl < 5 || iph->version != 4) + return 0; + + len = ntohs(iph->tot_len); + if (skb->len < len) + return 0; + else if (len < (iph->ihl*4)) + return 0; + + if (!pskb_may_pull(skb, iph->ihl*4)) + return 0; + + return 1; +} +EXPORT_SYMBOL_GPL(nft_bridge_iphdr_validate); + +int nft_bridge_ip6hdr_validate(struct sk_buff *skb) +{ + struct ipv6hdr *hdr; + u32 pkt_len; + + if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) + return 0; + + hdr = ipv6_hdr(skb); + if (hdr->version != 6) + return 0; + + pkt_len = ntohs(hdr->payload_len); + if (pkt_len + sizeof(struct ipv6hdr) > skb->len) + return 0; + + return 1; +} +EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate); + +static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out) +{ + if (nft_bridge_iphdr_validate(skb)) + nft_set_pktinfo_ipv4(pkt, ops, skb, in, out); + else + nft_set_pktinfo(pkt, ops, skb, in, out); +} + +static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (nft_bridge_ip6hdr_validate(skb) && + nft_set_pktinfo_ipv6(pkt, ops, skb, in, out) == 0) + return; +#endif + nft_set_pktinfo(pkt, ops, skb, in, out); +} static unsigned int nft_do_chain_bridge(const struct nf_hook_ops *ops, @@ -23,7 +99,17 @@ nft_do_chain_bridge(const struct nf_hook_ops *ops, { struct nft_pktinfo pkt; - nft_set_pktinfo(&pkt, ops, skb, in, out); + switch (eth_hdr(skb)->h_proto) { + case htons(ETH_P_IP): + nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, in, out); + break; + case htons(ETH_P_IPV6): + nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, in, out); + break; + default: + nft_set_pktinfo(&pkt, ops, skb, in, out); + break; + } return nft_do_chain(&pkt, ops); } diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 48da2c54a69e36aa1ae2f1d9e84cdb82e49a84f3..b0330aecbf974e10ec8f64d3e93b012be5d689be 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -35,30 +36,6 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, skb_pull(nskb, ETH_HLEN); } -static int nft_reject_iphdr_validate(struct sk_buff *oldskb) -{ - struct iphdr *iph; - u32 len; - - if (!pskb_may_pull(oldskb, sizeof(struct iphdr))) - return 0; - - iph = ip_hdr(oldskb); - if (iph->ihl < 5 || iph->version != 4) - return 0; - - len = ntohs(iph->tot_len); - if (oldskb->len < len) - return 0; - else if (len < (iph->ihl*4)) - return 0; - - if (!pskb_may_pull(oldskb, iph->ihl*4)) - return 0; - - return 1; -} - static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; @@ -66,7 +43,7 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) const struct tcphdr *oth; struct tcphdr _oth; - if (!nft_reject_iphdr_validate(oldskb)) + if (!nft_bridge_iphdr_validate(oldskb)) return; oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); @@ -101,7 +78,7 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, void *payload; __wsum csum; - if (!nft_reject_iphdr_validate(oldskb)) + if (!nft_bridge_iphdr_validate(oldskb)) return; /* IP header checks: fragment. */ @@ -146,25 +123,6 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, br_deliver(br_port_get_rcu(oldskb->dev), nskb); } -static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb) -{ - struct ipv6hdr *hdr; - u32 pkt_len; - - if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr))) - return 0; - - hdr = ipv6_hdr(oldskb); - if (hdr->version != 6) - return 0; - - pkt_len = ntohs(hdr->payload_len); - if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len) - return 0; - - return 1; -} - static void nft_reject_br_send_v6_tcp_reset(struct net *net, struct sk_buff *oldskb, int hook) { @@ -174,7 +132,7 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net, unsigned int otcplen; struct ipv6hdr *nip6h; - if (!nft_reject_ip6hdr_validate(oldskb)) + if (!nft_bridge_ip6hdr_validate(oldskb)) return; oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook); @@ -207,7 +165,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net, unsigned int len; void *payload; - if (!nft_reject_ip6hdr_validate(oldskb)) + if (!nft_bridge_ip6hdr_validate(oldskb)) return; /* Include "As much of invoking packet as possible without the ICMPv6 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 43f750e88e199e451d7de049255257b46cd947ab..769b185fefbd5f6bb6d4e68c1bcb0caeafa23d25 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -293,7 +293,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, copylen = len; } - ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); + ret = skb_copy_datagram_msg(skb, 0, m, copylen); if (ret) goto out_free; @@ -418,7 +418,7 @@ unlock: } release_sock(sk); chunk = min_t(unsigned int, skb->len, size); - if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { + if (memcpy_to_msg(msg, skb->data, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (copied == 0) copied = -EFAULT; @@ -535,7 +535,7 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, goto err; ret = -EINVAL; - if (unlikely(msg->msg_iov->iov_base == NULL)) + if (unlikely(msg->msg_iter.iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT; @@ -566,7 +566,7 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, skb_reserve(skb, cf_sk->headroom); - ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + ret = memcpy_from_msg(skb_put(skb, len), msg, len); if (ret) goto err; @@ -641,7 +641,7 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, */ size = min_t(int, size, skb_tailroom(skb)); - err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); + err = memcpy_from_msg(skb_put(skb, size), msg, size); if (err) { kfree_skb(skb); goto out_err; diff --git a/net/can/af_can.c b/net/can/af_can.c index ce82337521f665c5847819402d8a9c167452fb90..66e08040ced7557ba19e7535815804bbcffca12a 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -64,9 +64,6 @@ #include "af_can.h" -static __initconst const char banner[] = KERN_INFO - "can: controller area network core (" CAN_VERSION_STRING ")\n"; - MODULE_DESCRIPTION("Controller Area Network PF_CAN core"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann , " @@ -524,7 +521,7 @@ static void can_rx_delete_receiver(struct rcu_head *rp) /** * can_rx_unregister - unsubscribe CAN frames from a specific interface - * @dev: pointer to netdevice (NULL => unsubcribe from 'all' CAN devices list) + * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list) * @can_id: CAN identifier * @mask: CAN mask * @func: callback function on filter match @@ -896,7 +893,7 @@ static __init int can_init(void) offsetof(struct can_frame, data) != offsetof(struct canfd_frame, data)); - printk(banner); + pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n"); memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list)); diff --git a/net/can/bcm.c b/net/can/bcm.c index dcb75c0e66c1b69979a88c65efe43084046f8bc2..ee9ffd9565526eb0336fba37e17f0a31b2dd3c34 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -78,8 +78,6 @@ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) #define CAN_BCM_VERSION CAN_VERSION -static __initconst const char banner[] = KERN_INFO - "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"; MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); MODULE_LICENSE("Dual BSD/GPL"); @@ -441,7 +439,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op, /* mark as used and throttled by default */ lastdata->can_dlc |= (RX_RECV|RX_THR); - /* throtteling mode inactive ? */ + /* throttling mode inactive ? */ if (!op->kt_ival2.tv64) { /* send RX_CHANGED to the user immediately */ bcm_rx_changed(op, lastdata); @@ -452,7 +450,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op, if (hrtimer_active(&op->thrtimer)) return; - /* first receiption with enabled throttling mode */ + /* first reception with enabled throttling mode */ if (!op->kt_lastmsg.tv64) goto rx_changed_settime; @@ -480,7 +478,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, const struct can_frame *rxdata) { /* - * no one uses the MSBs of can_dlc for comparation, + * no one uses the MSBs of can_dlc for comparison, * so we use it here to detect the first time of reception */ @@ -510,7 +508,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, } /* - * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption + * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception */ static void bcm_rx_starttimer(struct bcm_op *op) { @@ -539,7 +537,7 @@ static void bcm_rx_timeout_tsklet(unsigned long data) } /* - * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out + * bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) { @@ -627,7 +625,7 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) } /* - * bcm_rx_handler - handle a CAN frame receiption + * bcm_rx_handler - handle a CAN frame reception */ static void bcm_rx_handler(struct sk_buff *skb, void *data) { @@ -858,8 +856,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, /* update can_frames content */ for (i = 0; i < msg_head->nframes; i++) { - err = memcpy_fromiovec((u8 *)&op->frames[i], - msg->msg_iov, CFSIZ); + err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ); if (op->frames[i].can_dlc > 8) err = -EINVAL; @@ -894,8 +891,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, op->frames = &op->sframe; for (i = 0; i < msg_head->nframes; i++) { - err = memcpy_fromiovec((u8 *)&op->frames[i], - msg->msg_iov, CFSIZ); + err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ); if (op->frames[i].can_dlc > 8) err = -EINVAL; @@ -1024,9 +1020,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (msg_head->nframes) { /* update can_frames content */ - err = memcpy_fromiovec((u8 *)op->frames, - msg->msg_iov, - msg_head->nframes * CFSIZ); + err = memcpy_from_msg((u8 *)op->frames, msg, + msg_head->nframes * CFSIZ); if (err < 0) return err; @@ -1072,8 +1067,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } if (msg_head->nframes) { - err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov, - msg_head->nframes * CFSIZ); + err = memcpy_from_msg((u8 *)op->frames, msg, + msg_head->nframes * CFSIZ); if (err < 0) { if (op->frames != &op->sframe) kfree(op->frames); @@ -1209,7 +1204,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) can_skb_reserve(skb); - err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ); + err = memcpy_from_msg(skb_put(skb, CFSIZ), msg, CFSIZ); if (err < 0) { kfree_skb(skb); return err; @@ -1285,7 +1280,7 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, /* read message head information */ - ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ); + ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ); if (ret < 0) return ret; @@ -1558,7 +1553,7 @@ static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, if (skb->len < size) size = skb->len; - err = memcpy_toiovec(msg->msg_iov, skb->data, size); + err = memcpy_to_msg(msg, skb->data, size); if (err < 0) { skb_free_datagram(sk, skb); return err; @@ -1615,7 +1610,7 @@ static int __init bcm_module_init(void) { int err; - printk(banner); + pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"); err = can_proto_register(&bcm_can_proto); if (err < 0) { diff --git a/net/can/gw.c b/net/can/gw.c index 050a2110d43f6b78f331b599569eaaf2d8803c24..295f62e62eb34bf4050eb0657c48426e583b65b4 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -361,7 +361,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) * The Controller Area Network controllers only accept CAN frames with * correct CRCs - which are not visible in the controller registers. * According to skbuff.h documentation the csum_start element for IP - * checksums is undefined/unsued when ip_summed == CHECKSUM_UNNECESSARY. + * checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY. * Only CAN skbs can be processed here which already have this property. */ diff --git a/net/can/raw.c b/net/can/raw.c index 081e81fd017fa53f7a6ed3afd341601b43377531..00c13ef23661bd92133fff7db45588d3bd83fc62 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -56,8 +56,6 @@ #include #define CAN_RAW_VERSION CAN_VERSION -static __initconst const char banner[] = - KERN_INFO "can: raw protocol (rev " CAN_RAW_VERSION ")\n"; MODULE_DESCRIPTION("PF_CAN raw protocol"); MODULE_LICENSE("Dual BSD/GPL"); @@ -703,7 +701,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; - err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); + err = memcpy_from_msg(skb_put(skb, size), msg, size); if (err < 0) goto free_skb; @@ -750,7 +748,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, else size = skb->len; - err = memcpy_toiovec(msg->msg_iov, skb->data, size); + err = memcpy_to_msg(msg, skb->data, size); if (err < 0) { skb_free_datagram(sk, skb); return err; @@ -810,7 +808,7 @@ static __init int raw_module_init(void) { int err; - printk(banner); + pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n"); err = can_proto_register(&raw_can_proto); if (err < 0) diff --git a/net/compat.c b/net/compat.c index bc8aeefddf3f23cb259b15c319e05949b9f78901..3236b4167a32109b91b1eb28ae8bdf522c8db3f5 100644 --- a/net/compat.c +++ b/net/compat.c @@ -31,81 +31,54 @@ #include #include -static inline int iov_from_user_compat_to_kern(struct iovec *kiov, - struct compat_iovec __user *uiov32, - int niov) +ssize_t get_compat_msghdr(struct msghdr *kmsg, + struct compat_msghdr __user *umsg, + struct sockaddr __user **save_addr, + struct iovec **iov) { - int tot_len = 0; - - while (niov > 0) { - compat_uptr_t buf; - compat_size_t len; - - if (get_user(len, &uiov32->iov_len) || - get_user(buf, &uiov32->iov_base)) - return -EFAULT; - - if (len > INT_MAX - tot_len) - len = INT_MAX - tot_len; - - tot_len += len; - kiov->iov_base = compat_ptr(buf); - kiov->iov_len = (__kernel_size_t) len; - uiov32++; - kiov++; - niov--; - } - return tot_len; -} - -int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) -{ - compat_uptr_t tmp1, tmp2, tmp3; + compat_uptr_t uaddr, uiov, tmp3; + compat_size_t nr_segs; + ssize_t err; if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) || - __get_user(tmp1, &umsg->msg_name) || + __get_user(uaddr, &umsg->msg_name) || __get_user(kmsg->msg_namelen, &umsg->msg_namelen) || - __get_user(tmp2, &umsg->msg_iov) || - __get_user(kmsg->msg_iovlen, &umsg->msg_iovlen) || + __get_user(uiov, &umsg->msg_iov) || + __get_user(nr_segs, &umsg->msg_iovlen) || __get_user(tmp3, &umsg->msg_control) || __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || __get_user(kmsg->msg_flags, &umsg->msg_flags)) return -EFAULT; if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); - kmsg->msg_name = compat_ptr(tmp1); - kmsg->msg_iov = compat_ptr(tmp2); kmsg->msg_control = compat_ptr(tmp3); - return 0; -} -/* I've named the args so it is easy to tell whose space the pointers are in. */ -int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, - struct sockaddr_storage *kern_address, int mode) -{ - int tot_len; + if (save_addr) + *save_addr = compat_ptr(uaddr); - if (kern_msg->msg_name && kern_msg->msg_namelen) { - if (mode == VERIFY_READ) { - int err = move_addr_to_kernel(kern_msg->msg_name, - kern_msg->msg_namelen, - kern_address); + if (uaddr && kmsg->msg_namelen) { + if (!save_addr) { + err = move_addr_to_kernel(compat_ptr(uaddr), + kmsg->msg_namelen, + kmsg->msg_name); if (err < 0) return err; } - kern_msg->msg_name = kern_address; } else { - kern_msg->msg_name = NULL; - kern_msg->msg_namelen = 0; + kmsg->msg_name = NULL; + kmsg->msg_namelen = 0; } - tot_len = iov_from_user_compat_to_kern(kern_iov, - (struct compat_iovec __user *)kern_msg->msg_iov, - kern_msg->msg_iovlen); - if (tot_len >= 0) - kern_msg->msg_iov = kern_iov; + if (nr_segs > UIO_MAXIOV) + return -EMSGSIZE; - return tot_len; + err = compat_rw_copy_check_uvector(save_addr ? READ : WRITE, + compat_ptr(uiov), nr_segs, + UIO_FASTIOV, *iov, iov); + if (err >= 0) + iov_iter_init(&kmsg->msg_iter, save_addr ? READ : WRITE, + *iov, nr_segs, err); + return err; } /* Bleech... */ @@ -740,7 +713,7 @@ COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, uns { if (flags & MSG_CMSG_COMPAT) return -EINVAL; - return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, @@ -756,7 +729,7 @@ COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, uns { if (flags & MSG_CMSG_COMPAT) return -EINVAL; - return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } COMPAT_SYSCALL_DEFINE4(recv, int, fd, void __user *, buf, compat_size_t, len, unsigned int, flags) diff --git a/net/core/datagram.c b/net/core/datagram.c index fdbc9a81d4c2033565c36815d425d0a7d80372b7..df493d68330c03d1cb5b59e40d31294d7f45b3f8 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -309,16 +310,14 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) EXPORT_SYMBOL(skb_kill_datagram); /** - * skb_copy_datagram_iovec - Copy a datagram to an iovec. + * skb_copy_datagram_iter - Copy a datagram to an iovec iterator. * @skb: buffer to copy * @offset: offset in the buffer to start copying from - * @to: io vector to copy to + * @to: iovec iterator to copy to * @len: amount of data to copy from buffer to iovec - * - * Note: the iovec is modified during the copy. */ -int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, - struct iovec *to, int len) +int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, + struct iov_iter *to, int len) { int start = skb_headlen(skb); int i, copy = start - offset; @@ -330,8 +329,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, if (copy > 0) { if (copy > len) copy = len; - if (memcpy_toiovec(to, skb->data + offset, copy)) - goto fault; + if (copy_to_iter(skb->data + offset, copy, to) != copy) + goto short_copy; if ((len -= copy) == 0) return 0; offset += copy; @@ -346,18 +345,12 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { - int err; - u8 *vaddr; - struct page *page = skb_frag_page(frag); - if (copy > len) copy = len; - vaddr = kmap(page); - err = memcpy_toiovec(to, vaddr + frag->page_offset + - offset - start, copy); - kunmap(page); - if (err) - goto fault; + if (copy_page_to_iter(skb_frag_page(frag), + frag->page_offset + offset - + start, copy, to) != copy) + goto short_copy; if (!(len -= copy)) return 0; offset += copy; @@ -374,9 +367,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (skb_copy_datagram_iovec(frag_iter, - offset - start, - to, copy)) + if (skb_copy_datagram_iter(frag_iter, offset - start, + to, copy)) goto fault; if ((len -= copy) == 0) return 0; @@ -387,113 +379,33 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, if (!len) return 0; + /* This is not really a user copy fault, but rather someone + * gave us a bogus length on the skb. We should probably + * print a warning here as it may indicate a kernel bug. + */ + fault: return -EFAULT; -} -EXPORT_SYMBOL(skb_copy_datagram_iovec); -/** - * skb_copy_datagram_const_iovec - Copy a datagram to an iovec. - * @skb: buffer to copy - * @offset: offset in the buffer to start copying from - * @to: io vector to copy to - * @to_offset: offset in the io vector to start copying to - * @len: amount of data to copy from buffer to iovec - * - * Returns 0 or -EFAULT. - * Note: the iovec is not modified during the copy. - */ -int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, - const struct iovec *to, int to_offset, - int len) -{ - int start = skb_headlen(skb); - int i, copy = start - offset; - struct sk_buff *frag_iter; +short_copy: + if (iov_iter_count(to)) + goto fault; - /* Copy header. */ - if (copy > 0) { - if (copy > len) - copy = len; - if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy)) - goto fault; - if ((len -= copy) == 0) - return 0; - offset += copy; - to_offset += copy; - } - - /* Copy paged appendix. Hmm... why does this look so complicated? */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - int end; - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - WARN_ON(start > offset + len); - - end = start + skb_frag_size(frag); - if ((copy = end - offset) > 0) { - int err; - u8 *vaddr; - struct page *page = skb_frag_page(frag); - - if (copy > len) - copy = len; - vaddr = kmap(page); - err = memcpy_toiovecend(to, vaddr + frag->page_offset + - offset - start, to_offset, copy); - kunmap(page); - if (err) - goto fault; - if (!(len -= copy)) - return 0; - offset += copy; - to_offset += copy; - } - start = end; - } - - skb_walk_frags(skb, frag_iter) { - int end; - - WARN_ON(start > offset + len); - - end = start + frag_iter->len; - if ((copy = end - offset) > 0) { - if (copy > len) - copy = len; - if (skb_copy_datagram_const_iovec(frag_iter, - offset - start, - to, to_offset, - copy)) - goto fault; - if ((len -= copy) == 0) - return 0; - offset += copy; - to_offset += copy; - } - start = end; - } - if (!len) - return 0; - -fault: - return -EFAULT; + return 0; } -EXPORT_SYMBOL(skb_copy_datagram_const_iovec); +EXPORT_SYMBOL(skb_copy_datagram_iter); /** - * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. + * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter. * @skb: buffer to copy * @offset: offset in the buffer to start copying to - * @from: io vector to copy to - * @from_offset: offset in the io vector to start copying from + * @from: the copy source * @len: amount of data to copy to buffer from iovec * * Returns 0 or -EFAULT. - * Note: the iovec is not modified during the copy. */ -int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, - const struct iovec *from, int from_offset, +int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, + struct iov_iter *from, int len) { int start = skb_headlen(skb); @@ -504,13 +416,11 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, if (copy > 0) { if (copy > len) copy = len; - if (memcpy_fromiovecend(skb->data + offset, from, from_offset, - copy)) + if (copy_from_iter(skb->data + offset, copy, from) != copy) goto fault; if ((len -= copy) == 0) return 0; offset += copy; - from_offset += copy; } /* Copy paged appendix. Hmm... why does this look so complicated? */ @@ -522,24 +432,19 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { - int err; - u8 *vaddr; - struct page *page = skb_frag_page(frag); + size_t copied; if (copy > len) copy = len; - vaddr = kmap(page); - err = memcpy_fromiovecend(vaddr + frag->page_offset + - offset - start, - from, from_offset, copy); - kunmap(page); - if (err) + copied = copy_page_from_iter(skb_frag_page(frag), + frag->page_offset + offset - start, + copy, from); + if (copied != copy) goto fault; if (!(len -= copy)) return 0; offset += copy; - from_offset += copy; } start = end; } @@ -553,16 +458,13 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (skb_copy_datagram_from_iovec(frag_iter, - offset - start, - from, - from_offset, - copy)) + if (skb_copy_datagram_from_iter(frag_iter, + offset - start, + from, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; - from_offset += copy; } start = end; } @@ -572,101 +474,82 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, fault: return -EFAULT; } -EXPORT_SYMBOL(skb_copy_datagram_from_iovec); +EXPORT_SYMBOL(skb_copy_datagram_from_iter); /** - * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec + * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter * @skb: buffer to copy - * @from: io vector to copy from - * @offset: offset in the io vector to start copying from - * @count: amount of vectors to copy to buffer from + * @from: the source to copy from * * The function will first copy up to headlen, and then pin the userspace * pages and build frags through them. * * Returns 0, -EFAULT or -EMSGSIZE. - * Note: the iovec is not modified during the copy */ -int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, - int offset, size_t count) +int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) { - int len = iov_length(from, count) - offset; + int len = iov_iter_count(from); int copy = min_t(int, skb_headlen(skb), len); - int size; - int i = 0; + int frag = 0; /* copy up to skb headlen */ - if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy)) + if (skb_copy_datagram_from_iter(skb, 0, from, copy)) return -EFAULT; - if (len == copy) - return 0; - - offset += copy; - while (count--) { - struct page *page[MAX_SKB_FRAGS]; - int num_pages; - unsigned long base; + while (iov_iter_count(from)) { + struct page *pages[MAX_SKB_FRAGS]; + size_t start; + ssize_t copied; unsigned long truesize; + int n = 0; - /* Skip over from offset and copied */ - if (offset >= from->iov_len) { - offset -= from->iov_len; - ++from; - continue; - } - len = from->iov_len - offset; - base = (unsigned long)from->iov_base + offset; - size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; - if (i + size > MAX_SKB_FRAGS) + if (frag == MAX_SKB_FRAGS) return -EMSGSIZE; - num_pages = get_user_pages_fast(base, size, 0, &page[i]); - if (num_pages != size) { - release_pages(&page[i], num_pages, 0); + + copied = iov_iter_get_pages(from, pages, ~0U, + MAX_SKB_FRAGS - frag, &start); + if (copied < 0) return -EFAULT; - } - truesize = size * PAGE_SIZE; - skb->data_len += len; - skb->len += len; + + iov_iter_advance(from, copied); + + truesize = PAGE_ALIGN(copied + start); + skb->data_len += copied; + skb->len += copied; skb->truesize += truesize; atomic_add(truesize, &skb->sk->sk_wmem_alloc); - while (len) { - int off = base & ~PAGE_MASK; - int size = min_t(int, len, PAGE_SIZE - off); - skb_fill_page_desc(skb, i, page[i], off, size); - base += size; - len -= size; - i++; + while (copied) { + int size = min_t(int, copied, PAGE_SIZE - start); + skb_fill_page_desc(skb, frag++, pages[n], start, size); + start = 0; + copied -= size; + n++; } - offset = 0; - ++from; } return 0; } -EXPORT_SYMBOL(zerocopy_sg_from_iovec); +EXPORT_SYMBOL(zerocopy_sg_from_iter); static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, - u8 __user *to, int len, + struct iov_iter *to, int len, __wsum *csump) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; int pos = 0; + int n; /* Copy header. */ if (copy > 0) { - int err = 0; if (copy > len) copy = len; - *csump = csum_and_copy_to_user(skb->data + offset, to, copy, - *csump, &err); - if (err) + n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); + if (n != copy) goto fault; if ((len -= copy) == 0) return 0; offset += copy; - to += copy; pos = copy; } @@ -678,26 +561,22 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { - __wsum csum2; - int err = 0; - u8 *vaddr; + __wsum csum2 = 0; struct page *page = skb_frag_page(frag); + u8 *vaddr = kmap(page); if (copy > len) copy = len; - vaddr = kmap(page); - csum2 = csum_and_copy_to_user(vaddr + - frag->page_offset + - offset - start, - to, copy, 0, &err); + n = csum_and_copy_to_iter(vaddr + frag->page_offset + + offset - start, copy, + &csum2, to); kunmap(page); - if (err) + if (n != copy) goto fault; *csump = csum_block_add(*csump, csum2, pos); if (!(len -= copy)) return 0; offset += copy; - to += copy; pos += copy; } start = end; @@ -722,7 +601,6 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, if ((len -= copy) == 0) return 0; offset += copy; - to += copy; pos += copy; } start = end; @@ -775,20 +653,19 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) EXPORT_SYMBOL(__skb_checksum_complete); /** - * skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec. + * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec. * @skb: skbuff * @hlen: hardware length - * @iov: io vector + * @msg: destination * * Caller _must_ check that skb will fit to this iovec. * * Returns: 0 - success. * -EINVAL - checksum failure. - * -EFAULT - fault during copy. Beware, in this case iovec - * can be modified! + * -EFAULT - fault during copy. */ -int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, - int hlen, struct iovec *iov) +int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, + int hlen, struct msghdr *msg) { __wsum csum; int chunk = skb->len - hlen; @@ -796,28 +673,20 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, if (!chunk) return 0; - /* Skip filled elements. - * Pretty silly, look at memcpy_toiovec, though 8) - */ - while (!iov->iov_len) - iov++; - - if (iov->iov_len < chunk) { + if (iov_iter_count(&msg->msg_iter) < chunk) { if (__skb_checksum_complete(skb)) goto csum_error; - if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) + if (skb_copy_datagram_msg(skb, hlen, msg, chunk)) goto fault; } else { csum = csum_partial(skb->data, hlen, skb->csum); - if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, + if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter, chunk, &csum)) goto fault; if (csum_fold(csum)) goto csum_error; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); - iov->iov_len -= chunk; - iov->iov_base += chunk; } return 0; csum_error: @@ -825,7 +694,7 @@ csum_error: fault: return -EFAULT; } -EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec); +EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); /** * datagram_poll - generic datagram poll diff --git a/net/core/dev.c b/net/core/dev.c index 3acff097456023e17b1e69188a0553dd5afce8a8..f411c28d0a66805661db0a409b140d02e8ca4041 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -118,6 +118,7 @@ #include #include #include +#include #include #include #include @@ -133,6 +134,7 @@ #include #include #include +#include #include "net-sysfs.h" @@ -1435,22 +1437,17 @@ EXPORT_SYMBOL(dev_close); */ void dev_disable_lro(struct net_device *dev) { - /* - * If we're trying to disable lro on a vlan device - * use the underlying physical device instead - */ - if (is_vlan_dev(dev)) - dev = vlan_dev_real_dev(dev); - - /* the same for macvlan devices */ - if (netif_is_macvlan(dev)) - dev = macvlan_dev_real_dev(dev); + struct net_device *lower_dev; + struct list_head *iter; dev->wanted_features &= ~NETIF_F_LRO; netdev_update_features(dev); if (unlikely(dev->features & NETIF_F_LRO)) netdev_WARN(dev, "failed to disable LRO!\n"); + + netdev_for_each_lower_dev(dev, lower_dev, iter) + dev_disable_lro(lower_dev); } EXPORT_SYMBOL(dev_disable_lro); @@ -2530,7 +2527,7 @@ static netdev_features_t net_mpls_features(struct sk_buff *skb, netdev_features_t features, __be16 type) { - if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC)) + if (eth_p_mpls(type)) features &= skb->dev->mpls_features; return features; @@ -2647,12 +2644,8 @@ static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features) { if (vlan_tx_tag_present(skb) && - !vlan_hw_offload_capable(features, skb->vlan_proto)) { - skb = __vlan_put_tag(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); - if (skb) - skb->vlan_tci = 0; - } + !vlan_hw_offload_capable(features, skb->vlan_proto)) + skb = __vlan_hwaccel_push_inside(skb); return skb; } @@ -3304,7 +3297,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, rps_lock(sd); qlen = skb_queue_len(&sd->input_pkt_queue); if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { - if (skb_queue_len(&sd->input_pkt_queue)) { + if (qlen) { enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); input_queue_tail_incr_save(sd, qtail); @@ -4179,7 +4172,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi) struct sk_buff *skb = napi->skb; if (!skb) { - skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); + skb = napi_alloc_skb(napi, GRO_MAX_HEAD); napi->skb = skb; } return skb; @@ -4316,20 +4309,28 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) local_irq_enable(); } +static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) +{ +#ifdef CONFIG_RPS + return sd->rps_ipi_list != NULL; +#else + return false; +#endif +} + static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); -#ifdef CONFIG_RPS /* Check if we have pending ipi, its better to send them now, * not waiting net_rx_action() end. */ - if (sd->rps_ipi_list) { + if (sd_has_rps_ipi_waiting(sd)) { local_irq_disable(); net_rps_action_and_irq_enable(sd); } -#endif + napi->weight = weight_p; local_irq_disable(); while (1) { @@ -4356,7 +4357,6 @@ static int process_backlog(struct napi_struct *napi, int quota) * We can use a plain write instead of clear_bit(), * and we dont need an smp_mb() memory barrier. */ - list_del(&napi->poll_list); napi->state = 0; rps_unlock(sd); @@ -4376,7 +4376,8 @@ static int process_backlog(struct napi_struct *napi, int quota) * __napi_schedule - schedule for receive * @n: entry to schedule * - * The entry's receive function will be scheduled to run + * The entry's receive function will be scheduled to run. + * Consider using __napi_schedule_irqoff() if hard irqs are masked. */ void __napi_schedule(struct napi_struct *n) { @@ -4388,18 +4389,29 @@ void __napi_schedule(struct napi_struct *n) } EXPORT_SYMBOL(__napi_schedule); +/** + * __napi_schedule_irqoff - schedule for receive + * @n: entry to schedule + * + * Variant of __napi_schedule() assuming hard irqs are masked + */ +void __napi_schedule_irqoff(struct napi_struct *n) +{ + ____napi_schedule(this_cpu_ptr(&softnet_data), n); +} +EXPORT_SYMBOL(__napi_schedule_irqoff); + void __napi_complete(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); - BUG_ON(n->gro_list); - list_del(&n->poll_list); + list_del_init(&n->poll_list); smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); } EXPORT_SYMBOL(__napi_complete); -void napi_complete(struct napi_struct *n) +void napi_complete_done(struct napi_struct *n, int work_done) { unsigned long flags; @@ -4410,12 +4422,28 @@ void napi_complete(struct napi_struct *n) if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) return; - napi_gro_flush(n, false); - local_irq_save(flags); - __napi_complete(n); - local_irq_restore(flags); + if (n->gro_list) { + unsigned long timeout = 0; + + if (work_done) + timeout = n->dev->gro_flush_timeout; + + if (timeout) + hrtimer_start(&n->timer, ns_to_ktime(timeout), + HRTIMER_MODE_REL_PINNED); + else + napi_gro_flush(n, false); + } + if (likely(list_empty(&n->poll_list))) { + WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state)); + } else { + /* If n->poll_list is not empty, we need to mask irqs */ + local_irq_save(flags); + __napi_complete(n); + local_irq_restore(flags); + } } -EXPORT_SYMBOL(napi_complete); +EXPORT_SYMBOL(napi_complete_done); /* must be called under rcu_read_lock(), as we dont take a reference */ struct napi_struct *napi_by_id(unsigned int napi_id) @@ -4469,10 +4497,23 @@ void napi_hash_del(struct napi_struct *napi) } EXPORT_SYMBOL_GPL(napi_hash_del); +static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) +{ + struct napi_struct *napi; + + napi = container_of(timer, struct napi_struct, timer); + if (napi->gro_list) + napi_schedule(napi); + + return HRTIMER_NORESTART; +} + void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { INIT_LIST_HEAD(&napi->poll_list); + hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); + napi->timer.function = napi_watchdog; napi->gro_count = 0; napi->gro_list = NULL; napi->skb = NULL; @@ -4491,6 +4532,20 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, } EXPORT_SYMBOL(netif_napi_add); +void napi_disable(struct napi_struct *n) +{ + might_sleep(); + set_bit(NAPI_STATE_DISABLE, &n->state); + + while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) + msleep(1); + + hrtimer_cancel(&n->timer); + + clear_bit(NAPI_STATE_DISABLE, &n->state); +} +EXPORT_SYMBOL(napi_disable); + void netif_napi_del(struct napi_struct *napi) { list_del_init(&napi->dev_list); @@ -4507,29 +4562,28 @@ static void net_rx_action(struct softirq_action *h) struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; + LIST_HEAD(list); + LIST_HEAD(repoll); void *have; local_irq_disable(); + list_splice_init(&sd->poll_list, &list); + local_irq_enable(); - while (!list_empty(&sd->poll_list)) { + while (!list_empty(&list)) { struct napi_struct *n; int work, weight; - /* If softirq window is exhuasted then punt. + /* If softirq window is exhausted then punt. * Allow this to run for 2 jiffies since which will allow * an average latency of 1.5/HZ. */ if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) goto softnet_break; - local_irq_enable(); - /* Even though interrupts have been re-enabled, this - * access is safe because interrupts can only add new - * entries to the tail of this list, and only ->poll() - * calls can remove this head entry from the list. - */ - n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); + n = list_first_entry(&list, struct napi_struct, poll_list); + list_del_init(&n->poll_list); have = netpoll_poll_lock(n); @@ -4551,8 +4605,6 @@ static void net_rx_action(struct softirq_action *h) budget -= work; - local_irq_disable(); - /* Drivers must not modify the NAPI state if they * consume the entire weight. In such cases this code * still "owns" the NAPI instance and therefore can @@ -4560,32 +4612,40 @@ static void net_rx_action(struct softirq_action *h) */ if (unlikely(work == weight)) { if (unlikely(napi_disable_pending(n))) { - local_irq_enable(); napi_complete(n); - local_irq_disable(); } else { if (n->gro_list) { /* flush too old packets * If HZ < 1000, flush all packets. */ - local_irq_enable(); napi_gro_flush(n, HZ >= 1000); - local_irq_disable(); } - list_move_tail(&n->poll_list, &sd->poll_list); + list_add_tail(&n->poll_list, &repoll); } } netpoll_poll_unlock(have); } + + if (!sd_has_rps_ipi_waiting(sd) && + list_empty(&list) && + list_empty(&repoll)) + return; out: + local_irq_disable(); + + list_splice_tail_init(&sd->poll_list, &list); + list_splice_tail(&repoll, &list); + list_splice(&list, &sd->poll_list); + if (!list_empty(&sd->poll_list)) + __raise_softirq_irqoff(NET_RX_SOFTIRQ); + net_rps_action_and_irq_enable(sd); return; softnet_break: sd->time_squeeze++; - __raise_softirq_irqoff(NET_RX_SOFTIRQ); goto out; } @@ -5786,7 +5846,7 @@ EXPORT_SYMBOL(dev_change_carrier); * Get device physical port ID */ int dev_get_phys_port_id(struct net_device *dev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { const struct net_device_ops *ops = dev->netdev_ops; @@ -5865,6 +5925,8 @@ static void rollback_registered_many(struct list_head *head) synchronize_net(); list_for_each_entry(dev, head, unreg_list) { + struct sk_buff *skb = NULL; + /* Shutdown queueing discipline. */ dev_shutdown(dev); @@ -5874,6 +5936,11 @@ static void rollback_registered_many(struct list_head *head) */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); + if (!dev->rtnl_link_ops || + dev->rtnl_link_state == RTNL_LINK_INITIALIZED) + skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, + GFP_KERNEL); + /* * Flush the unicast and multicast chains */ @@ -5883,9 +5950,8 @@ static void rollback_registered_many(struct list_head *head) if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); - if (!dev->rtnl_link_ops || - dev->rtnl_link_state == RTNL_LINK_INITIALIZED) - rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); + if (skb) + rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); /* Notifier chain MUST detach us all upper devices. */ WARN_ON(netdev_has_any_upper_dev(dev)); diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index b6b230600b974ab908679fb62d225ed084ea5276..c0548d268e1a2ffa381082324cd5497c1fd49ffb 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -278,8 +278,8 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, EXPORT_SYMBOL(__hw_addr_sync_dev); /** - * __hw_addr_unsync_dev - Remove synchonized addresses from device - * @list: address list to remove syncronized addresses from + * __hw_addr_unsync_dev - Remove synchronized addresses from device + * @list: address list to remove synchronized addresses from * @dev: device to sync * @unsync: function to call if address should be removed * diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 72e899a3efdaadd82877550d4991b70424cff5a2..b94b1d29350603e19c3db8e6fd740d3f89440771 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -142,10 +142,12 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm case SIOCGIFHWADDR: if (!dev->addr_len) - memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); + memset(ifr->ifr_hwaddr.sa_data, 0, + sizeof(ifr->ifr_hwaddr.sa_data)); else memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, - min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); + min(sizeof(ifr->ifr_hwaddr.sa_data), + (size_t)dev->addr_len)); ifr->ifr_hwaddr.sa_family = dev->type; return 0; @@ -265,7 +267,8 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) if (ifr->ifr_hwaddr.sa_family != dev->type) return -EINVAL; memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, - min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); + min(sizeof(ifr->ifr_hwaddr.sa_data), + (size_t)dev->addr_len)); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return 0; diff --git a/net/core/dst.c b/net/core/dst.c index a028409ee438c27d738deb1b065269a441a0f5f3..e956ce6d13782f2da0a229cabafef663665159eb 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -327,30 +327,6 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) } EXPORT_SYMBOL(__dst_destroy_metrics_generic); -/** - * __skb_dst_set_noref - sets skb dst, without a reference - * @skb: buffer - * @dst: dst entry - * @force: if force is set, use noref version even for DST_NOCACHE entries - * - * Sets skb dst, assuming a reference was not taken on dst - * skb_dst_drop() should not dst_release() this dst - */ -void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, bool force) -{ - WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); - /* If dst not in cache, we must take a reference, because - * dst_release() will destroy dst as soon as its refcount becomes zero - */ - if (unlikely((dst->flags & DST_NOCACHE) && !force)) { - dst_hold(dst); - skb_dst_set(skb, dst); - } else { - skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; - } -} -EXPORT_SYMBOL(__skb_dst_set_noref); - /* Dirty hack. We did it in 2.2 (in __dst_free), * we have _very_ good reasons not to repeat * this mistake in 2.3, but we have no choice diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 06dfb293e5aafa10ab2754bfb80d29ad29f2db95..550892cd6b3ff4ec1325bc1094bb9f217409c7e2 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -25,6 +25,7 @@ #include #include #include +#include /* * Some useful ethtool_ops methods that're device independent. @@ -84,7 +85,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation", [NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation", [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", - [NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", @@ -100,6 +100,12 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_BUSY_POLL_BIT] = "busy-poll", }; +static const char +rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = { + [ETH_RSS_HASH_TOP_BIT] = "toeplitz", + [ETH_RSS_HASH_XOR_BIT] = "xor", +}; + static int ethtool_get_features(struct net_device *dev, void __user *useraddr) { struct ethtool_gfeatures cmd = { @@ -185,6 +191,9 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset) if (sset == ETH_SS_FEATURES) return ARRAY_SIZE(netdev_features_strings); + if (sset == ETH_SS_RSS_HASH_FUNCS) + return ARRAY_SIZE(rss_hash_func_strings); + if (ops->get_sset_count && ops->get_strings) return ops->get_sset_count(dev, sset); else @@ -199,6 +208,9 @@ static void __ethtool_get_strings(struct net_device *dev, if (stringset == ETH_SS_FEATURES) memcpy(data, netdev_features_strings, sizeof(netdev_features_strings)); + else if (stringset == ETH_SS_RSS_HASH_FUNCS) + memcpy(data, rss_hash_func_strings, + sizeof(rss_hash_func_strings)); else /* ops->get_strings is valid because checked earlier */ ops->get_strings(dev, stringset, data); @@ -574,6 +586,16 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, return 0; } +u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(netdev_rss_key)); + net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key)); + memcpy(buffer, netdev_rss_key, len); +} +EXPORT_SYMBOL(netdev_rss_key_fill); + static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, void __user *useraddr) { @@ -608,7 +630,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, if (!indir) return -ENOMEM; - ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL); + ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL); if (ret) goto out; @@ -669,7 +691,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, goto out; } - ret = ops->set_rxfh(dev, indir, NULL); + ret = ops->set_rxfh(dev, indir, NULL, ETH_RSS_HASH_NO_CHANGE); out: kfree(indir); @@ -687,12 +709,11 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, u32 total_size; u32 indir_bytes; u32 *indir = NULL; + u8 dev_hfunc = 0; u8 *hkey = NULL; u8 *rss_config; - if (!(dev->ethtool_ops->get_rxfh_indir_size || - dev->ethtool_ops->get_rxfh_key_size) || - !dev->ethtool_ops->get_rxfh) + if (!ops->get_rxfh) return -EOPNOTSUPP; if (ops->get_rxfh_indir_size) @@ -700,16 +721,14 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, if (ops->get_rxfh_key_size) dev_key_size = ops->get_rxfh_key_size(dev); - if ((dev_key_size + dev_indir_size) == 0) - return -EOPNOTSUPP; - if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) return -EFAULT; user_indir_size = rxfh.indir_size; user_key_size = rxfh.key_size; /* Check that reserved fields are 0 for now */ - if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1]) + if (rxfh.rss_context || rxfh.rsvd8[0] || rxfh.rsvd8[1] || + rxfh.rsvd8[2] || rxfh.rsvd32) return -EINVAL; rxfh.indir_size = dev_indir_size; @@ -717,13 +736,6 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, if (copy_to_user(useraddr, &rxfh, sizeof(rxfh))) return -EFAULT; - /* If the user buffer size is 0, this is just a query for the - * device table size and key size. Otherwise, if the User size is - * not equal to device table size or key size it's an error. - */ - if (!user_indir_size && !user_key_size) - return 0; - if ((user_indir_size && (user_indir_size != dev_indir_size)) || (user_key_size && (user_key_size != dev_key_size))) return -EINVAL; @@ -740,14 +752,19 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, if (user_key_size) hkey = rss_config + indir_bytes; - ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey); - if (!ret) { - if (copy_to_user(useraddr + - offsetof(struct ethtool_rxfh, rss_config[0]), - rss_config, total_size)) - ret = -EFAULT; - } + ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey, &dev_hfunc); + if (ret) + goto out; + if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc), + &dev_hfunc, sizeof(rxfh.hfunc))) { + ret = -EFAULT; + } else if (copy_to_user(useraddr + + offsetof(struct ethtool_rxfh, rss_config[0]), + rss_config, total_size)) { + ret = -EFAULT; + } +out: kfree(rss_config); return ret; @@ -766,33 +783,31 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, u8 *rss_config; u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]); - if (!(ops->get_rxfh_indir_size || ops->get_rxfh_key_size) || - !ops->get_rxnfc || !ops->set_rxfh) + if (!ops->get_rxnfc || !ops->set_rxfh) return -EOPNOTSUPP; if (ops->get_rxfh_indir_size) dev_indir_size = ops->get_rxfh_indir_size(dev); if (ops->get_rxfh_key_size) dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev); - if ((dev_key_size + dev_indir_size) == 0) - return -EOPNOTSUPP; if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) return -EFAULT; /* Check that reserved fields are 0 for now */ - if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1]) + if (rxfh.rss_context || rxfh.rsvd8[0] || rxfh.rsvd8[1] || + rxfh.rsvd8[2] || rxfh.rsvd32) return -EINVAL; - /* If either indir or hash key is valid, proceed further. - * It is not valid to request that both be unchanged. + /* If either indir, hash key or function is valid, proceed further. + * Must request at least one change: indir size, hash key or function. */ if ((rxfh.indir_size && rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE && rxfh.indir_size != dev_indir_size) || (rxfh.key_size && (rxfh.key_size != dev_key_size)) || (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE && - rxfh.key_size == 0)) + rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE)) return -EINVAL; if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) @@ -835,7 +850,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, } } - ret = ops->set_rxfh(dev, indir, hkey); + ret = ops->set_rxfh(dev, indir, hkey, rxfh.hfunc); out: kfree(rss_config); diff --git a/net/core/filter.c b/net/core/filter.c index 647b12265e181b3ab650670f5572caa8a0f802a6..ec9baea10c16c78200ec1809a5491f3aa01e83de 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -44,6 +44,7 @@ #include #include #include +#include /** * sk_filter - run a packet through a socket filter @@ -813,8 +814,12 @@ static void bpf_release_orig_filter(struct bpf_prog *fp) static void __bpf_prog_release(struct bpf_prog *prog) { - bpf_release_orig_filter(prog); - bpf_prog_free(prog); + if (prog->aux->prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { + bpf_prog_put(prog); + } else { + bpf_release_orig_filter(prog); + bpf_prog_free(prog); + } } static void __sk_filter_release(struct sk_filter *fp) @@ -1088,6 +1093,94 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) } EXPORT_SYMBOL_GPL(sk_attach_filter); +#ifdef CONFIG_BPF_SYSCALL +int sk_attach_bpf(u32 ufd, struct sock *sk) +{ + struct sk_filter *fp, *old_fp; + struct bpf_prog *prog; + + if (sock_flag(sk, SOCK_FILTER_LOCKED)) + return -EPERM; + + prog = bpf_prog_get(ufd); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + if (prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) { + /* valid fd, but invalid program type */ + bpf_prog_put(prog); + return -EINVAL; + } + + fp = kmalloc(sizeof(*fp), GFP_KERNEL); + if (!fp) { + bpf_prog_put(prog); + return -ENOMEM; + } + fp->prog = prog; + + atomic_set(&fp->refcnt, 0); + + if (!sk_filter_charge(sk, fp)) { + __sk_filter_release(fp); + return -ENOMEM; + } + + old_fp = rcu_dereference_protected(sk->sk_filter, + sock_owned_by_user(sk)); + rcu_assign_pointer(sk->sk_filter, fp); + + if (old_fp) + sk_filter_uncharge(sk, old_fp); + + return 0; +} + +/* allow socket filters to call + * bpf_map_lookup_elem(), bpf_map_update_elem(), bpf_map_delete_elem() + */ +static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func_id) +{ + switch (func_id) { + case BPF_FUNC_map_lookup_elem: + return &bpf_map_lookup_elem_proto; + case BPF_FUNC_map_update_elem: + return &bpf_map_update_elem_proto; + case BPF_FUNC_map_delete_elem: + return &bpf_map_delete_elem_proto; + default: + return NULL; + } +} + +static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type) +{ + /* skb fields cannot be accessed yet */ + return false; +} + +static struct bpf_verifier_ops sock_filter_ops = { + .get_func_proto = sock_filter_func_proto, + .is_valid_access = sock_filter_is_valid_access, +}; + +static struct bpf_prog_type_list tl = { + .ops = &sock_filter_ops, + .type = BPF_PROG_TYPE_SOCKET_FILTER, +}; + +static int __init register_sock_filter_ops(void) +{ + bpf_register_prog_type(&tl); + return 0; +} +late_initcall(register_sock_filter_ops); +#else +int sk_attach_bpf(u32 ufd, struct sock *sk) +{ + return -EOPNOTSUPP; +} +#endif int sk_detach_filter(struct sock *sk) { int ret = -ENOENT; diff --git a/net/core/iovec.c b/net/core/iovec.c index e1ec45ab1e63c6909073691907c80e57bac89643..dcbe98b3726aeb8f3e11791ebe5ace3e14b67216 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c @@ -27,53 +27,6 @@ #include #include -/* - * Verify iovec. The caller must ensure that the iovec is big enough - * to hold the message iovec. - * - * Save time not doing access_ok. copy_*_user will make this work - * in any case. - */ - -int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) -{ - int size, ct, err; - - if (m->msg_name && m->msg_namelen) { - if (mode == VERIFY_READ) { - void __user *namep; - namep = (void __user __force *) m->msg_name; - err = move_addr_to_kernel(namep, m->msg_namelen, - address); - if (err < 0) - return err; - } - m->msg_name = address; - } else { - m->msg_name = NULL; - m->msg_namelen = 0; - } - - size = m->msg_iovlen * sizeof(struct iovec); - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) - return -EFAULT; - - m->msg_iov = iov; - err = 0; - - for (ct = 0; ct < m->msg_iovlen; ct++) { - size_t len = iov[ct].iov_len; - - if (len > INT_MAX - err) { - len = INT_MAX - err; - iov[ct].iov_len = len; - } - err += len; - } - - return err; -} - /* * And now for the all-in-one: copy and checksum from a user iovec * directly to a datagram diff --git a/net/core/link_watch.c b/net/core/link_watch.c index bd0767e6b2b31747a08d02a1f45f7dd733be9a4f..49a9e3e06c085dbcb545e766c96186fba2dac45a 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include enum lw_bits { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ef31fef25e5a872eb6565c1d57953d0c6558d2c7..8e38f17288d3c5a475471b0e56e339d9b6d5bf9e 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -56,7 +56,6 @@ static void __neigh_notify(struct neighbour *n, int type, int flags); static void neigh_update_notify(struct neighbour *neigh); static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); -static struct neigh_table *neigh_tables; #ifdef CONFIG_PROC_FS static const struct file_operations neigh_stat_seq_fops; #endif @@ -87,13 +86,8 @@ static const struct file_operations neigh_stat_seq_fops; the most complicated procedure, which we allow is dev->hard_header. It is supposed, that dev->hard_header is simplistic and does not make callbacks to neighbour tables. - - The last lock is neigh_tbl_lock. It is pure SMP lock, protecting - list of neighbour tables. This list is used only in process context, */ -static DEFINE_RWLOCK(neigh_tbl_lock); - static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) { kfree_skb(skb); @@ -773,7 +767,7 @@ static void neigh_periodic_work(struct work_struct *work) if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { struct neigh_parms *p; tbl->last_rand = jiffies; - for (p = &tbl->parms; p; p = p->next) + list_for_each_entry(p, &tbl->parms_list, list) p->reachable_time = neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); } @@ -1446,7 +1440,7 @@ static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, { struct neigh_parms *p; - for (p = &tbl->parms; p; p = p->next) { + list_for_each_entry(p, &tbl->parms_list, list) { if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || (!p->dev && !ifindex && net_eq(net, &init_net))) return p; @@ -1481,8 +1475,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, } write_lock_bh(&tbl->lock); - p->next = tbl->parms.next; - tbl->parms.next = p; + list_add(&p->list, &tbl->parms.list); write_unlock_bh(&tbl->lock); neigh_parms_data_state_cleanall(p); @@ -1501,24 +1494,15 @@ static void neigh_rcu_free_parms(struct rcu_head *head) void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) { - struct neigh_parms **p; - if (!parms || parms == &tbl->parms) return; write_lock_bh(&tbl->lock); - for (p = &tbl->parms.next; *p; p = &(*p)->next) { - if (*p == parms) { - *p = parms->next; - parms->dead = 1; - write_unlock_bh(&tbl->lock); - if (parms->dev) - dev_put(parms->dev); - call_rcu(&parms->rcu_head, neigh_rcu_free_parms); - return; - } - } + list_del(&parms->list); + parms->dead = 1; write_unlock_bh(&tbl->lock); - neigh_dbg(1, "%s: not found\n", __func__); + if (parms->dev) + dev_put(parms->dev); + call_rcu(&parms->rcu_head, neigh_rcu_free_parms); } EXPORT_SYMBOL(neigh_parms_release); @@ -1530,11 +1514,15 @@ static void neigh_parms_destroy(struct neigh_parms *parms) static struct lock_class_key neigh_table_proxy_queue_class; -static void neigh_table_init_no_netlink(struct neigh_table *tbl) +static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly; + +void neigh_table_init(int index, struct neigh_table *tbl) { unsigned long now = jiffies; unsigned long phsize; + INIT_LIST_HEAD(&tbl->parms_list); + list_add(&tbl->parms.list, &tbl->parms_list); write_pnet(&tbl->parms.net, &init_net); atomic_set(&tbl->parms.refcnt, 1); tbl->parms.reachable_time = @@ -1574,34 +1562,14 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl) tbl->last_flush = now; tbl->last_rand = now + tbl->parms.reachable_time * 20; -} - -void neigh_table_init(struct neigh_table *tbl) -{ - struct neigh_table *tmp; - - neigh_table_init_no_netlink(tbl); - write_lock(&neigh_tbl_lock); - for (tmp = neigh_tables; tmp; tmp = tmp->next) { - if (tmp->family == tbl->family) - break; - } - tbl->next = neigh_tables; - neigh_tables = tbl; - write_unlock(&neigh_tbl_lock); - if (unlikely(tmp)) { - pr_err("Registering multiple tables for family %d\n", - tbl->family); - dump_stack(); - } + neigh_tables[index] = tbl; } EXPORT_SYMBOL(neigh_table_init); -int neigh_table_clear(struct neigh_table *tbl) +int neigh_table_clear(int index, struct neigh_table *tbl) { - struct neigh_table **tp; - + neigh_tables[index] = NULL; /* It is not clean... Fix it to unload IPv6 module safely */ cancel_delayed_work_sync(&tbl->gc_work); del_timer_sync(&tbl->proxy_timer); @@ -1609,14 +1577,6 @@ int neigh_table_clear(struct neigh_table *tbl) neigh_ifdown(tbl, NULL); if (atomic_read(&tbl->entries)) pr_crit("neighbour leakage\n"); - write_lock(&neigh_tbl_lock); - for (tp = &neigh_tables; *tp; tp = &(*tp)->next) { - if (*tp == tbl) { - *tp = tbl->next; - break; - } - } - write_unlock(&neigh_tbl_lock); call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, neigh_hash_free_rcu); @@ -1634,12 +1594,32 @@ int neigh_table_clear(struct neigh_table *tbl) } EXPORT_SYMBOL(neigh_table_clear); +static struct neigh_table *neigh_find_table(int family) +{ + struct neigh_table *tbl = NULL; + + switch (family) { + case AF_INET: + tbl = neigh_tables[NEIGH_ARP_TABLE]; + break; + case AF_INET6: + tbl = neigh_tables[NEIGH_ND_TABLE]; + break; + case AF_DECnet: + tbl = neigh_tables[NEIGH_DN_TABLE]; + break; + } + + return tbl; +} + static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; struct nlattr *dst_attr; struct neigh_table *tbl; + struct neighbour *neigh; struct net_device *dev = NULL; int err = -EINVAL; @@ -1660,39 +1640,31 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh) } } - read_lock(&neigh_tbl_lock); - for (tbl = neigh_tables; tbl; tbl = tbl->next) { - struct neighbour *neigh; + tbl = neigh_find_table(ndm->ndm_family); + if (tbl == NULL) + return -EAFNOSUPPORT; - if (tbl->family != ndm->ndm_family) - continue; - read_unlock(&neigh_tbl_lock); - - if (nla_len(dst_attr) < tbl->key_len) - goto out; - - if (ndm->ndm_flags & NTF_PROXY) { - err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); - goto out; - } + if (nla_len(dst_attr) < tbl->key_len) + goto out; - if (dev == NULL) - goto out; + if (ndm->ndm_flags & NTF_PROXY) { + err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); + goto out; + } - neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); - if (neigh == NULL) { - err = -ENOENT; - goto out; - } + if (dev == NULL) + goto out; - err = neigh_update(neigh, NULL, NUD_FAILED, - NEIGH_UPDATE_F_OVERRIDE | - NEIGH_UPDATE_F_ADMIN); - neigh_release(neigh); + neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); + if (neigh == NULL) { + err = -ENOENT; goto out; } - read_unlock(&neigh_tbl_lock); - err = -EAFNOSUPPORT; + + err = neigh_update(neigh, NULL, NUD_FAILED, + NEIGH_UPDATE_F_OVERRIDE | + NEIGH_UPDATE_F_ADMIN); + neigh_release(neigh); out: return err; @@ -1700,11 +1672,14 @@ out: static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh) { + int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE; struct net *net = sock_net(skb->sk); struct ndmsg *ndm; struct nlattr *tb[NDA_MAX+1]; struct neigh_table *tbl; struct net_device *dev = NULL; + struct neighbour *neigh; + void *dst, *lladdr; int err; ASSERT_RTNL(); @@ -1728,70 +1703,60 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh) goto out; } - read_lock(&neigh_tbl_lock); - for (tbl = neigh_tables; tbl; tbl = tbl->next) { - int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE; - struct neighbour *neigh; - void *dst, *lladdr; + tbl = neigh_find_table(ndm->ndm_family); + if (tbl == NULL) + return -EAFNOSUPPORT; - if (tbl->family != ndm->ndm_family) - continue; - read_unlock(&neigh_tbl_lock); + if (nla_len(tb[NDA_DST]) < tbl->key_len) + goto out; + dst = nla_data(tb[NDA_DST]); + lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; - if (nla_len(tb[NDA_DST]) < tbl->key_len) - goto out; - dst = nla_data(tb[NDA_DST]); - lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; + if (ndm->ndm_flags & NTF_PROXY) { + struct pneigh_entry *pn; - if (ndm->ndm_flags & NTF_PROXY) { - struct pneigh_entry *pn; + err = -ENOBUFS; + pn = pneigh_lookup(tbl, net, dst, dev, 1); + if (pn) { + pn->flags = ndm->ndm_flags; + err = 0; + } + goto out; + } - err = -ENOBUFS; - pn = pneigh_lookup(tbl, net, dst, dev, 1); - if (pn) { - pn->flags = ndm->ndm_flags; - err = 0; - } + if (dev == NULL) + goto out; + + neigh = neigh_lookup(tbl, dst, dev); + if (neigh == NULL) { + if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { + err = -ENOENT; goto out; } - if (dev == NULL) + neigh = __neigh_lookup_errno(tbl, dst, dev); + if (IS_ERR(neigh)) { + err = PTR_ERR(neigh); + goto out; + } + } else { + if (nlh->nlmsg_flags & NLM_F_EXCL) { + err = -EEXIST; + neigh_release(neigh); goto out; - - neigh = neigh_lookup(tbl, dst, dev); - if (neigh == NULL) { - if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { - err = -ENOENT; - goto out; - } - - neigh = __neigh_lookup_errno(tbl, dst, dev); - if (IS_ERR(neigh)) { - err = PTR_ERR(neigh); - goto out; - } - } else { - if (nlh->nlmsg_flags & NLM_F_EXCL) { - err = -EEXIST; - neigh_release(neigh); - goto out; - } - - if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) - flags &= ~NEIGH_UPDATE_F_OVERRIDE; } - if (ndm->ndm_flags & NTF_USE) { - neigh_event_send(neigh, NULL); - err = 0; - } else - err = neigh_update(neigh, lladdr, ndm->ndm_state, flags); - neigh_release(neigh); - goto out; + if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) + flags &= ~NEIGH_UPDATE_F_OVERRIDE; } - read_unlock(&neigh_tbl_lock); - err = -EAFNOSUPPORT; + if (ndm->ndm_flags & NTF_USE) { + neigh_event_send(neigh, NULL); + err = 0; + } else + err = neigh_update(neigh, lladdr, ndm->ndm_state, flags); + neigh_release(neigh); + out: return err; } @@ -1990,7 +1955,8 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) struct neigh_table *tbl; struct ndtmsg *ndtmsg; struct nlattr *tb[NDTA_MAX+1]; - int err; + bool found = false; + int err, tidx; err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, nl_neightbl_policy); @@ -2003,19 +1969,21 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) } ndtmsg = nlmsg_data(nlh); - read_lock(&neigh_tbl_lock); - for (tbl = neigh_tables; tbl; tbl = tbl->next) { + + for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { + tbl = neigh_tables[tidx]; + if (!tbl) + continue; if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) continue; - - if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) + if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { + found = true; break; + } } - if (tbl == NULL) { - err = -ENOENT; - goto errout_locked; - } + if (!found) + return -ENOENT; /* * We acquire tbl->lock to be nice to the periodic timers and @@ -2126,8 +2094,6 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) errout_tbl_lock: write_unlock_bh(&tbl->lock); -errout_locked: - read_unlock(&neigh_tbl_lock); errout: return err; } @@ -2142,10 +2108,13 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; - read_lock(&neigh_tbl_lock); - for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) { + for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { struct neigh_parms *p; + tbl = neigh_tables[tidx]; + if (!tbl) + continue; + if (tidx < tbl_skip || (family && tbl->family != family)) continue; @@ -2154,7 +2123,9 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) NLM_F_MULTI) <= 0) break; - for (nidx = 0, p = tbl->parms.next; p; p = p->next) { + nidx = 0; + p = list_next_entry(&tbl->parms, list); + list_for_each_entry_from(p, &tbl->parms_list, list) { if (!net_eq(neigh_parms_net(p), net)) continue; @@ -2174,7 +2145,6 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) neigh_skip = 0; } out: - read_unlock(&neigh_tbl_lock); cb->args[0] = tidx; cb->args[1] = nidx; @@ -2357,7 +2327,6 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) int proxy = 0; int err; - read_lock(&neigh_tbl_lock); family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; /* check for full ndmsg structure presence, family member is @@ -2369,8 +2338,11 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; - for (tbl = neigh_tables, t = 0; tbl; - tbl = tbl->next, t++) { + for (t = 0; t < NEIGH_NR_TABLES; t++) { + tbl = neigh_tables[t]; + + if (!tbl) + continue; if (t < s_t || (family && tbl->family != family)) continue; if (t > s_t) @@ -2383,7 +2355,6 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) if (err < 0) break; } - read_unlock(&neigh_tbl_lock); cb->args[0] = t; return skb->len; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 9dd06699b09c9434e60aa40a948adf69d5505d2d..999341244434309b6275fe6230a9f1bf7c57d2b2 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -325,6 +326,23 @@ static ssize_t tx_queue_len_store(struct device *dev, } NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong); +static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) +{ + dev->gro_flush_timeout = val; + return 0; +} + +static ssize_t gro_flush_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); +} +NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); + static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -387,7 +405,7 @@ static ssize_t phys_port_id_show(struct device *dev, return restart_syscall(); if (dev_isalive(netdev)) { - struct netdev_phys_port_id ppid; + struct netdev_phys_item_id ppid; ret = dev_get_phys_port_id(netdev, &ppid); if (!ret) @@ -399,6 +417,28 @@ static ssize_t phys_port_id_show(struct device *dev, } static DEVICE_ATTR_RO(phys_port_id); +static ssize_t phys_switch_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + ssize_t ret = -EINVAL; + + if (!rtnl_trylock()) + return restart_syscall(); + + if (dev_isalive(netdev)) { + struct netdev_phys_item_id ppid; + + ret = netdev_switch_parent_id_get(netdev, &ppid); + if (!ret) + ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); + } + rtnl_unlock(); + + return ret; +} +static DEVICE_ATTR_RO(phys_switch_id); + static struct attribute *net_class_attrs[] = { &dev_attr_netdev_group.attr, &dev_attr_type.attr, @@ -422,7 +462,9 @@ static struct attribute *net_class_attrs[] = { &dev_attr_mtu.attr, &dev_attr_flags.attr, &dev_attr_tx_queue_len.attr, + &dev_attr_gro_flush_timeout.attr, &dev_attr_phys_port_id.attr, + &dev_attr_phys_switch_id.attr, NULL, }; ATTRIBUTE_GROUPS(net_class); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index e6645b4f330af1d16607ca0d6d9c9c95fd163dc6..e0ad5d16c9c56947163d81201af07e26d1d3017c 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -79,8 +79,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, if (vlan_tx_tag_present(skb) && !vlan_hw_offload_capable(features, skb->vlan_proto)) { - skb = __vlan_put_tag(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); + skb = __vlan_hwaccel_push_inside(skb); if (unlikely(!skb)) { /* This is actually a packet drop, but we * don't want the code that calls this @@ -88,7 +87,6 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, */ goto out; } - skb->vlan_tci = 0; } status = netdev_start_xmit(skb, dev, txq, false); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 443256bdcddc8e01c2ba0f0b01c0b0bf78a6608b..da934fc3faa824458b6c8a139e929d72876d57d2 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3728,8 +3728,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, /* Remove proc before if_list entry, because add_device uses * list to determine if interface already exist, avoid race * with proc_create_data() */ - if (pkt_dev->entry) - proc_remove(pkt_dev->entry); + proc_remove(pkt_dev->entry); /* And update the thread if_list */ _rem_dev_from_if_list(t, pkt_dev); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 88e8de3b59b0f4e1b251928a6088019b6513ce68..d06107d36ec892a2168e7638e43cf004d8f7f92a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -43,6 +44,7 @@ #include #include +#include #include #include #include @@ -868,7 +870,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */ - + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */ + + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ + + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */ } static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) @@ -952,7 +955,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) { int err; - struct netdev_phys_port_id ppid; + struct netdev_phys_item_id ppid; err = dev_get_phys_port_id(dev, &ppid); if (err) { @@ -967,6 +970,24 @@ static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) return 0; } +static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) +{ + int err; + struct netdev_phys_item_id psid; + + err = netdev_switch_parent_id_get(dev, &psid); + if (err) { + if (err == -EOPNOTSUPP) + return 0; + return err; + } + + if (nla_put(skb, IFLA_PHYS_SWITCH_ID, psid.id_len, psid.id)) + return -EMSGSIZE; + + return 0; +} + static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, unsigned int flags, u32 ext_filter_mask) @@ -1039,6 +1060,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, if (rtnl_phys_port_id_fill(skb, dev)) goto nla_put_failure; + if (rtnl_phys_switch_id_fill(skb, dev)) + goto nla_put_failure; + attr = nla_reserve(skb, IFLA_STATS, sizeof(struct rtnl_link_stats)); if (attr == NULL) @@ -1196,8 +1220,9 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_PROMISCUITY] = { .type = NLA_U32 }, [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, - [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN }, + [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ + [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { @@ -2221,8 +2246,8 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, - gfp_t flags) +struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, + unsigned int change, gfp_t flags) { struct net *net = dev_net(dev); struct sk_buff *skb; @@ -2240,11 +2265,28 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, kfree_skb(skb); goto errout; } - rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags); - return; + return skb; errout: if (err < 0) rtnl_set_sk_err(net, RTNLGRP_LINK, err); + return NULL; +} + +void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags) +{ + struct net *net = dev_net(dev); + + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags); +} + +void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, + gfp_t flags) +{ + struct sk_buff *skb; + + skb = rtmsg_ifinfo_build_skb(type, dev, change, flags); + if (skb) + rtmsg_ifinfo_send(skb, dev, flags); } EXPORT_SYMBOL(rtmsg_ifinfo); @@ -2313,7 +2355,7 @@ errout: int ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, + const unsigned char *addr, u16 vid, u16 flags) { int err = -EINVAL; @@ -2339,6 +2381,28 @@ int ndo_dflt_fdb_add(struct ndmsg *ndm, } EXPORT_SYMBOL(ndo_dflt_fdb_add); +static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid) +{ + u16 vid = 0; + + if (vlan_attr) { + if (nla_len(vlan_attr) != sizeof(u16)) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n"); + return -EINVAL; + } + + vid = nla_get_u16(vlan_attr); + + if (!vid || vid >= VLAN_VID_MASK) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n", + vid); + return -EINVAL; + } + } + *p_vid = vid; + return 0; +} + static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); @@ -2346,6 +2410,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) struct nlattr *tb[NDA_MAX+1]; struct net_device *dev; u8 *addr; + u16 vid; int err; err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); @@ -2371,6 +2436,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) addr = nla_data(tb[NDA_LLADDR]); + err = fdb_vid_parse(tb[NDA_VLAN], &vid); + if (err) + return err; + err = -EOPNOTSUPP; /* Support fdb on master device the net/bridge default case */ @@ -2379,7 +2448,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) struct net_device *br_dev = netdev_master_upper_dev_get(dev); const struct net_device_ops *ops = br_dev->netdev_ops; - err = ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags); + err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, + nlh->nlmsg_flags); if (err) goto out; else @@ -2390,9 +2460,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) if ((ndm->ndm_flags & NTF_SELF)) { if (dev->netdev_ops->ndo_fdb_add) err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, + vid, nlh->nlmsg_flags); else - err = ndo_dflt_fdb_add(ndm, tb, dev, addr, + err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, nlh->nlmsg_flags); if (!err) { @@ -2410,7 +2481,7 @@ out: int ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr) + const unsigned char *addr, u16 vid) { int err = -EINVAL; @@ -2439,6 +2510,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) struct net_device *dev; int err = -EINVAL; __u8 *addr; + u16 vid; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; @@ -2466,6 +2538,10 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) addr = nla_data(tb[NDA_LLADDR]); + err = fdb_vid_parse(tb[NDA_VLAN], &vid); + if (err) + return err; + err = -EOPNOTSUPP; /* Support fdb on master device the net/bridge default case */ @@ -2475,7 +2551,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) const struct net_device_ops *ops = br_dev->netdev_ops; if (ops->ndo_fdb_del) - err = ops->ndo_fdb_del(ndm, tb, dev, addr); + err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid); if (err) goto out; @@ -2486,9 +2562,10 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) /* Embedded bridge, macvlan, and any other device support */ if (ndm->ndm_flags & NTF_SELF) { if (dev->netdev_ops->ndo_fdb_del) - err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr); + err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr, + vid); else - err = ndo_dflt_fdb_del(ndm, tb, dev, addr); + err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); if (!err) { rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); @@ -2628,12 +2705,22 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } +static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, + unsigned int attrnum, unsigned int flag) +{ + if (mask & flag) + return nla_put_u8(skb, attrnum, !!(flags & flag)); + return 0; +} + int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u16 mode) + struct net_device *dev, u16 mode, + u32 flags, u32 mask) { struct nlmsghdr *nlh; struct ifinfomsg *ifm; struct nlattr *br_afspec; + struct nlattr *protinfo; u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; struct net_device *br_dev = netdev_master_upper_dev_get(dev); @@ -2665,13 +2752,46 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, if (!br_afspec) goto nla_put_failure; - if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF) || - nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { + if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { nla_nest_cancel(skb, br_afspec); goto nla_put_failure; } + + if (mode != BRIDGE_MODE_UNDEF) { + if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { + nla_nest_cancel(skb, br_afspec); + goto nla_put_failure; + } + } nla_nest_end(skb, br_afspec); + protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); + if (!protinfo) + goto nla_put_failure; + + if (brport_nla_put_flag(skb, flags, mask, + IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || + brport_nla_put_flag(skb, flags, mask, + IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || + brport_nla_put_flag(skb, flags, mask, + IFLA_BRPORT_FAST_LEAVE, + BR_MULTICAST_FAST_LEAVE) || + brport_nla_put_flag(skb, flags, mask, + IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || + brport_nla_put_flag(skb, flags, mask, + IFLA_BRPORT_LEARNING, BR_LEARNING) || + brport_nla_put_flag(skb, flags, mask, + IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || + brport_nla_put_flag(skb, flags, mask, + IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || + brport_nla_put_flag(skb, flags, mask, + IFLA_BRPORT_PROXYARP, BR_PROXYARP)) { + nla_nest_cancel(skb, protinfo); + goto nla_put_failure; + } + + nla_nest_end(skb, protinfo); + return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/core/scm.c b/net/core/scm.c index b442e7e25e601b19a8237d1a90efbaa1d572fc19..3b6899b7d810d569057b051162b51fde7c51cba8 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -129,8 +129,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) struct cmsghdr *cmsg; int err; - for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) - { + for_each_cmsghdr(cmsg, msg) { err = -EINVAL; /* Verify that cmsg_len is at least sizeof(struct cmsghdr) */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 32e31c2996315770149b71b1e17837311d672a1e..ae13ef6b3ea7f40b6843fa2d5b33fc06de0cb0b5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -265,7 +265,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, skb->fclone = SKB_FCLONE_ORIG; atomic_set(&fclones->fclone_ref, 1); - fclones->skb2.fclone = SKB_FCLONE_FREE; + fclones->skb2.fclone = SKB_FCLONE_CLONE; fclones->skb2.pfmemalloc = pfmemalloc; } out: @@ -336,59 +336,85 @@ struct netdev_alloc_cache { unsigned int pagecnt_bias; }; static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); +static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache); -static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) +static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, + gfp_t gfp_mask) { - struct netdev_alloc_cache *nc; - void *data = NULL; - int order; - unsigned long flags; + const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER; + struct page *page = NULL; + gfp_t gfp = gfp_mask; + + if (order) { + gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; + page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); + nc->frag.size = PAGE_SIZE << (page ? order : 0); + } - local_irq_save(flags); - nc = this_cpu_ptr(&netdev_alloc_cache); - if (unlikely(!nc->frag.page)) { + if (unlikely(!page)) + page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); + + nc->frag.page = page; + + return page; +} + +static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache, + unsigned int fragsz, gfp_t gfp_mask) +{ + struct netdev_alloc_cache *nc = this_cpu_ptr(cache); + struct page *page = nc->frag.page; + unsigned int size; + int offset; + + if (unlikely(!page)) { refill: - for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { - gfp_t gfp = gfp_mask; + page = __page_frag_refill(nc, gfp_mask); + if (!page) + return NULL; + + /* if size can vary use frag.size else just use PAGE_SIZE */ + size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE; - if (order) - gfp |= __GFP_COMP | __GFP_NOWARN; - nc->frag.page = alloc_pages(gfp, order); - if (likely(nc->frag.page)) - break; - if (--order < 0) - goto end; - } - nc->frag.size = PAGE_SIZE << order; /* Even if we own the page, we do not use atomic_set(). * This would break get_page_unless_zero() users. */ - atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1, - &nc->frag.page->_count); - nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; - nc->frag.offset = 0; + atomic_add(size - 1, &page->_count); + + /* reset page count bias and offset to start of new frag */ + nc->pagecnt_bias = size; + nc->frag.offset = size; } - if (nc->frag.offset + fragsz > nc->frag.size) { - if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) { - if (!atomic_sub_and_test(nc->pagecnt_bias, - &nc->frag.page->_count)) - goto refill; - /* OK, page count is 0, we can safely set it */ - atomic_set(&nc->frag.page->_count, - NETDEV_PAGECNT_MAX_BIAS); - } else { - atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias, - &nc->frag.page->_count); - } - nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; - nc->frag.offset = 0; + offset = nc->frag.offset - fragsz; + if (unlikely(offset < 0)) { + if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count)) + goto refill; + + /* if size can vary use frag.size else just use PAGE_SIZE */ + size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE; + + /* OK, page count is 0, we can safely set it */ + atomic_set(&page->_count, size); + + /* reset page count bias and offset to start of new frag */ + nc->pagecnt_bias = size; + offset = size - fragsz; } - data = page_address(nc->frag.page) + nc->frag.offset; - nc->frag.offset += fragsz; nc->pagecnt_bias--; -end: + nc->frag.offset = offset; + + return page_address(page) + offset; +} + +static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) +{ + unsigned long flags; + void *data; + + local_irq_save(flags); + data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask); local_irq_restore(flags); return data; } @@ -406,11 +432,25 @@ void *netdev_alloc_frag(unsigned int fragsz) } EXPORT_SYMBOL(netdev_alloc_frag); +static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) +{ + return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask); +} + +void *napi_alloc_frag(unsigned int fragsz) +{ + return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); +} +EXPORT_SYMBOL(napi_alloc_frag); + /** - * __netdev_alloc_skb - allocate an skbuff for rx on a specific device - * @dev: network device to receive on + * __alloc_rx_skb - allocate an skbuff for rx * @length: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb + * @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for + * allocations in case we have to fallback to __alloc_skb() + * If SKB_ALLOC_NAPI is set, page fragment will be allocated + * from napi_cache instead of netdev_cache. * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate @@ -419,11 +459,11 @@ EXPORT_SYMBOL(netdev_alloc_frag); * * %NULL is returned if there is no free memory. */ -struct sk_buff *__netdev_alloc_skb(struct net_device *dev, - unsigned int length, gfp_t gfp_mask) +static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask, + int flags) { struct sk_buff *skb = NULL; - unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + + unsigned int fragsz = SKB_DATA_ALIGN(length) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { @@ -432,7 +472,9 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; - data = __netdev_alloc_frag(fragsz, gfp_mask); + data = (flags & SKB_ALLOC_NAPI) ? + __napi_alloc_frag(fragsz, gfp_mask) : + __netdev_alloc_frag(fragsz, gfp_mask); if (likely(data)) { skb = build_skb(data, fragsz); @@ -440,17 +482,72 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, put_page(virt_to_head_page(data)); } } else { - skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, + skb = __alloc_skb(length, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); } + return skb; +} + +/** + * __netdev_alloc_skb - allocate an skbuff for rx on a specific device + * @dev: network device to receive on + * @length: length to allocate + * @gfp_mask: get_free_pages mask, passed to alloc_skb + * + * Allocate a new &sk_buff and assign it a usage count of one. The + * buffer has NET_SKB_PAD headroom built in. Users should allocate + * the headroom they think they need without accounting for the + * built in space. The built in space is used for optimisations. + * + * %NULL is returned if there is no free memory. + */ +struct sk_buff *__netdev_alloc_skb(struct net_device *dev, + unsigned int length, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + length += NET_SKB_PAD; + skb = __alloc_rx_skb(length, gfp_mask, 0); + if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; } + return skb; } EXPORT_SYMBOL(__netdev_alloc_skb); +/** + * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance + * @napi: napi instance this buffer was allocated for + * @length: length to allocate + * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages + * + * Allocate a new sk_buff for use in NAPI receive. This buffer will + * attempt to allocate the head from a special reserved region used + * only for NAPI Rx allocation. By doing this we can save several + * CPU cycles by avoiding having to disable and re-enable IRQs. + * + * %NULL is returned if there is no free memory. + */ +struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, + unsigned int length, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + length += NET_SKB_PAD + NET_IP_ALIGN; + skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI); + + if (likely(skb)) { + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + skb->dev = napi->dev; + } + + return skb; +} +EXPORT_SYMBOL(__napi_alloc_skb); + void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize) { @@ -541,26 +638,27 @@ static void kfree_skbmem(struct sk_buff *skb) switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: kmem_cache_free(skbuff_head_cache, skb); - break; + return; case SKB_FCLONE_ORIG: fclones = container_of(skb, struct sk_buff_fclones, skb1); - if (atomic_dec_and_test(&fclones->fclone_ref)) - kmem_cache_free(skbuff_fclone_cache, fclones); - break; - case SKB_FCLONE_CLONE: - fclones = container_of(skb, struct sk_buff_fclones, skb2); - - /* The clone portion is available for - * fast-cloning again. + /* We usually free the clone (TX completion) before original skb + * This test would have no chance to be true for the clone, + * while here, branch prediction will be good. */ - skb->fclone = SKB_FCLONE_FREE; + if (atomic_read(&fclones->fclone_ref) == 1) + goto fastpath; + break; - if (atomic_dec_and_test(&fclones->fclone_ref)) - kmem_cache_free(skbuff_fclone_cache, fclones); + default: /* SKB_FCLONE_CLONE */ + fclones = container_of(skb, struct sk_buff_fclones, skb2); break; } + if (!atomic_dec_and_test(&fclones->fclone_ref)) + return; +fastpath: + kmem_cache_free(skbuff_fclone_cache, fclones); } static void skb_release_head_state(struct sk_buff *skb) @@ -872,15 +970,15 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) struct sk_buff_fclones *fclones = container_of(skb, struct sk_buff_fclones, skb1); - struct sk_buff *n = &fclones->skb2; + struct sk_buff *n; if (skb_orphan_frags(skb, gfp_mask)) return NULL; if (skb->fclone == SKB_FCLONE_ORIG && - n->fclone == SKB_FCLONE_FREE) { - n->fclone = SKB_FCLONE_CLONE; - atomic_inc(&fclones->fclone_ref); + atomic_read(&fclones->fclone_ref) == 1) { + n = &fclones->skb2; + atomic_set(&fclones->fclone_ref, 2); } else { if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; @@ -3002,7 +3100,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, if (nskb->len == len + doffset) goto perform_csum_check; - if (!sg) { + if (!sg && !nskb->remcsum_offload) { nskb->ip_summed = CHECKSUM_NONE; nskb->csum = skb_copy_and_csum_bits(head_skb, offset, skb_put(nskb, len), @@ -3074,7 +3172,7 @@ skip_fraglist: nskb->truesize += nskb->data_len; perform_csum_check: - if (!csum) { + if (!csum && !nskb->remcsum_offload) { nskb->csum = skb_checksum(nskb, doffset, nskb->len - doffset, 0); nskb->ip_summed = CHECKSUM_NONE; @@ -3088,6 +3186,16 @@ perform_csum_check: * (see validate_xmit_skb_list() for example) */ segs->prev = tail; + + /* Following permits correct backpressure, for protocols + * using skb_set_owner_w(). + * Idea is to tranfert ownership from head_skb to last segment. + */ + if (head_skb->destructor == sock_wfree) { + swap(tail->truesize, head_skb->truesize); + swap(tail->destructor, head_skb->destructor); + swap(tail->sk, head_skb->sk); + } return segs; err: @@ -4130,6 +4238,113 @@ err_free: } EXPORT_SYMBOL(skb_vlan_untag); +int skb_ensure_writable(struct sk_buff *skb, int write_len) +{ + if (!pskb_may_pull(skb, write_len)) + return -ENOMEM; + + if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) + return 0; + + return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} +EXPORT_SYMBOL(skb_ensure_writable); + +/* remove VLAN header from packet and update csum accordingly. */ +static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) +{ + struct vlan_hdr *vhdr; + unsigned int offset = skb->data - skb_mac_header(skb); + int err; + + __skb_push(skb, offset); + err = skb_ensure_writable(skb, VLAN_ETH_HLEN); + if (unlikely(err)) + goto pull; + + skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); + + vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); + *vlan_tci = ntohs(vhdr->h_vlan_TCI); + + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + __skb_pull(skb, VLAN_HLEN); + + vlan_set_encap_proto(skb, vhdr); + skb->mac_header += VLAN_HLEN; + + if (skb_network_offset(skb) < ETH_HLEN) + skb_set_network_header(skb, ETH_HLEN); + + skb_reset_mac_len(skb); +pull: + __skb_pull(skb, offset); + + return err; +} + +int skb_vlan_pop(struct sk_buff *skb) +{ + u16 vlan_tci; + __be16 vlan_proto; + int err; + + if (likely(vlan_tx_tag_present(skb))) { + skb->vlan_tci = 0; + } else { + if (unlikely((skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) || + skb->len < VLAN_ETH_HLEN)) + return 0; + + err = __skb_vlan_pop(skb, &vlan_tci); + if (err) + return err; + } + /* move next vlan tag to hw accel tag */ + if (likely((skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) || + skb->len < VLAN_ETH_HLEN)) + return 0; + + vlan_proto = skb->protocol; + err = __skb_vlan_pop(skb, &vlan_tci); + if (unlikely(err)) + return err; + + __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); + return 0; +} +EXPORT_SYMBOL(skb_vlan_pop); + +int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) +{ + if (vlan_tx_tag_present(skb)) { + unsigned int offset = skb->data - skb_mac_header(skb); + int err; + + /* __vlan_insert_tag expect skb->data pointing to mac header. + * So change skb->data before calling it and change back to + * original position later + */ + __skb_push(skb, offset); + err = __vlan_insert_tag(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); + if (err) + return err; + skb->protocol = skb->vlan_proto; + skb->mac_len += VLAN_HLEN; + __skb_pull(skb, offset); + + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_add(skb->csum, csum_partial(skb->data + + (2 * ETH_ALEN), VLAN_HLEN, 0)); + } + __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); + return 0; +} +EXPORT_SYMBOL(skb_vlan_push); + /** * alloc_skb_with_frags - allocate skb with page frags * diff --git a/net/core/sock.c b/net/core/sock.c index 15e0c67b1069654af22ad9afe5c993cbcaafaec5..9a56b2000c3f374fb95aedada3327447816a9512 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -888,6 +888,19 @@ set_rcvbuf: } break; + case SO_ATTACH_BPF: + ret = -EINVAL; + if (optlen == sizeof(u32)) { + u32 ufd; + + ret = -EFAULT; + if (copy_from_user(&ufd, optval, sizeof(ufd))) + break; + + ret = sk_attach_bpf(ufd, sk); + } + break; + case SO_DETACH_FILTER: ret = sk_detach_filter(sk); break; @@ -1213,6 +1226,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_max_pacing_rate; break; + case SO_INCOMING_CPU: + v.val = sk->sk_incoming_cpu; + break; + default: return -ENOPROTOOPT; } @@ -1517,6 +1534,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_err = 0; newsk->sk_priority = 0; + newsk->sk_incoming_cpu = raw_smp_processor_id(); /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) @@ -2457,7 +2475,7 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, msg->msg_flags |= MSG_TRUNC; copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index cf9cd13509a7d531f9e4a455d2909f5dfb731923..31baba2a71ce15e49450f69dae81e7d3be1ff3f2 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -26,6 +26,8 @@ static int zero = 0; static int one = 1; static int ushort_max = USHRT_MAX; +static int net_msg_warn; /* Unused, but still a sysctl */ + #ifdef CONFIG_RPS static int rps_sock_flow_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -215,6 +217,18 @@ static int set_default_qdisc(struct ctl_table *table, int write, } #endif +static int proc_do_rss_key(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table fake_table; + char buf[NETDEV_RSS_KEY_LEN * 3]; + + snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key); + fake_table.data = buf; + fake_table.maxlen = sizeof(buf); + return proc_dostring(&fake_table, write, buffer, lenp, ppos); +} + static struct ctl_table net_core_table[] = { #ifdef CONFIG_NET { @@ -263,6 +277,13 @@ static struct ctl_table net_core_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "netdev_rss_key", + .data = &netdev_rss_key, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_do_rss_key, + }, #ifdef CONFIG_BPF_JIT { .procname = "bpf_jit_enable", diff --git a/net/core/utils.c b/net/core/utils.c index efc76dd9dcd160aaf31a37454fc980fbb7e9d5b4..7b803884c162834ddc5327805e6b0e3a636a82f0 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -33,9 +33,6 @@ #include #include -int net_msg_warn __read_mostly = 1; -EXPORT_SYMBOL(net_msg_warn); - DEFINE_RATELIMIT_STATE(net_ratelimit_state, 5 * HZ, 10); /* * All net warning printk()s should be guarded by this function. diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index ba07824af4c07950d8ee25c6abfccfb1fe919f5c..bd9e718c2a209b5d4b7e4859fc68f77c79541366 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -218,7 +218,7 @@ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets, * different underlying data structure. */ for (num_packets = num_cells = 1; lost_packets; ++num_cells) { - u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN); + u8 len = min_t(u32, lost_packets, DCCPAV_MAX_RUNLEN); av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1); av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index c67816647cce9d8b9fac7283179235ee1b642073..e4c144fa706fd72d5e266ecfc142fac83b405ae8 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -22,8 +22,8 @@ /* * DCCP - specific warning and debugging macros. */ -#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \ - __func__, ##a) +#define DCCP_WARN(fmt, ...) \ + net_warn_ratelimited("%s: " fmt, __func__, ##__VA_ARGS__) #define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \ __FILE__, __LINE__, __func__) #define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0) diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 9733ddbc96cb4e123e7991440a0a495af44ddd2e..1704948e6a12bc87827c71ccb4e81de8b8c2c9f7 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -478,7 +478,7 @@ static struct dccp_feat_entry * * @fn_list: feature-negotiation list to update * @feat: one of %dccp_feature_numbers * @local: whether local (1) or remote (0) @feat_num is meant - * @needs_mandatory: whether to use Mandatory feature negotiation options + * @mandatory: whether to use Mandatory feature negotiation options * @fval: pointer to NN/SP value to be inserted (will be copied) */ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local, @@ -1050,7 +1050,7 @@ static u8 dccp_feat_prefer(u8 preferred_value, u8 *array, u8 array_len) /** * dccp_feat_reconcile - Reconcile SP preference lists - * @fval: SP list to reconcile into + * @fv: SP list to reconcile into * @arr: received SP preference list * @len: length of @arr in bytes * @is_server: whether this side is the server (and @fv is the server's list) diff --git a/net/dccp/input.c b/net/dccp/input.c index 3c8ec7d4a34ec3523a041be3b4c1fd8f73541010..3bd14e8853969d8e69f1b67b6eaecd4cf16864e1 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -537,7 +537,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, case DCCP_PKT_DATAACK: case DCCP_PKT_ACK: /* - * FIXME: we should be reseting the PARTOPEN (DELACK) timer + * FIXME: we should be resetting the PARTOPEN (DELACK) timer * here but only if we haven't used the DELACK timer for * something else, like sending a delayed ack for a TIMESTAMP * echo, etc, for now were not clearing it, sending an extra diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 6ca645c4b48e8b56a88f7d549b073fb6dc82eeb7..e45b968613a449206767455431eeadf808d2253a 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -140,7 +140,6 @@ failure: inet->inet_dport = 0; goto out; } - EXPORT_SYMBOL_GPL(dccp_v4_connect); /* @@ -376,7 +375,6 @@ void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb) inet->inet_saddr, inet->inet_daddr); } - EXPORT_SYMBOL_GPL(dccp_v4_send_check); static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb) @@ -444,7 +442,6 @@ put_and_exit: dccp_done(newsk); goto exit; } - EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock); static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) @@ -670,7 +667,6 @@ drop: DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); return -1; } - EXPORT_SYMBOL_GPL(dccp_v4_conn_request); int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) @@ -729,7 +725,6 @@ discard: kfree_skb(skb); return 0; } - EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); /** @@ -802,7 +797,6 @@ int dccp_invalid_packet(struct sk_buff *skb) return 0; } - EXPORT_SYMBOL_GPL(dccp_invalid_packet); /* this is called when real data arrives */ diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 5ab6627cf3704a8479d35170546d1d683185dbbd..e171b780b499dabc7d7312bb3aadbbe4b005f2bd 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -703,7 +703,7 @@ EXPORT_SYMBOL_GPL(compat_dccp_getsockopt); static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb) { - struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); + struct cmsghdr *cmsg; /* * Assign an (opaque) qpolicy priority value to skb->priority. @@ -717,8 +717,7 @@ static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb) */ skb->priority = 0; - for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { - + for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; @@ -781,7 +780,7 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, goto out_release; skb_reserve(skb, sk->sk_prot->max_header); - rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc != 0) goto out_discard; @@ -896,7 +895,7 @@ verify_sock_status: else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; - if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) { + if (skb_copy_datagram_msg(skb, 0, msg, len)) { /* Exception. Bailout! */ len = -EFAULT; break; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 25733d53814763c85e49a612df3bbcdb202992d6..810228646de38f9fe26eb2c75a84fbc000840f7b 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1760,7 +1760,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, if ((chunk + copied) > size) chunk = size - copied; - if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { + if (memcpy_to_msg(msg, skb->data, chunk)) { rv = -EFAULT; break; } @@ -2032,7 +2032,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER); - if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + if (memcpy_from_msg(skb_put(skb, len), msg, len)) { err = -EFAULT; goto out; } diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index c8121ceddb9e65a87830a710ced847de02dda358..7ca7c3143da332d567bd336197beff18ce210083 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -591,7 +591,7 @@ static const struct file_operations dn_neigh_seq_fops = { void __init dn_neigh_init(void) { - neigh_table_init(&dn_neigh_table); + neigh_table_init(NEIGH_DN_TABLE, &dn_neigh_table); proc_create("decnet_neigh", S_IRUGO, init_net.proc_net, &dn_neigh_seq_fops); } @@ -599,5 +599,5 @@ void __init dn_neigh_init(void) void __exit dn_neigh_cleanup(void) { remove_proc_entry("decnet_neigh", init_net.proc_net); - neigh_table_clear(&dn_neigh_table); + neigh_table_clear(NEIGH_DN_TABLE, &dn_neigh_table); } diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index a585fd6352ebaf8e8bdf0e9772653857def08811..5f8ac404535bb6143d3fc2e90cacf5b0f0200129 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -11,6 +11,17 @@ config NET_DSA if NET_DSA +config NET_DSA_HWMON + bool "Distributed Switch Architecture HWMON support" + default y + depends on HWMON && !(NET_DSA=y && HWMON=m) + ---help--- + Say Y if you want to expose thermal sensor data on switches supported + by the Distributed Switch Architecture. + + Some of those switches contain thermal sensors. This data is available + via the hwmon sysfs interface and exposes the onboard sensors. + # tagging formats config NET_DSA_TAG_BRCM bool diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 6317b41c99b0a58bb82ede4a1205d8c3e7f94283..322c778487e7dd5f1276b84334838fb266241807 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -9,6 +9,9 @@ * (at your option) any later version. */ +#include +#include +#include #include #include #include @@ -17,6 +20,7 @@ #include #include #include +#include #include "dsa_priv.h" char dsa_driver_version[] = "0.1"; @@ -71,6 +75,104 @@ dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name) return ret; } +/* hwmon support ************************************************************/ + +#ifdef CONFIG_NET_DSA_HWMON + +static ssize_t temp1_input_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + int temp, ret; + + ret = ds->drv->get_temp(ds, &temp); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", temp * 1000); +} +static DEVICE_ATTR_RO(temp1_input); + +static ssize_t temp1_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + int temp, ret; + + ret = ds->drv->get_temp_limit(ds, &temp); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", temp * 1000); +} + +static ssize_t temp1_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + int temp, ret; + + ret = kstrtoint(buf, 0, &temp); + if (ret < 0) + return ret; + + ret = ds->drv->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000)); + if (ret < 0) + return ret; + + return count; +} +static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store); + +static ssize_t temp1_max_alarm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + bool alarm; + int ret; + + ret = ds->drv->get_temp_alarm(ds, &alarm); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", alarm); +} +static DEVICE_ATTR_RO(temp1_max_alarm); + +static struct attribute *dsa_hwmon_attrs[] = { + &dev_attr_temp1_input.attr, /* 0 */ + &dev_attr_temp1_max.attr, /* 1 */ + &dev_attr_temp1_max_alarm.attr, /* 2 */ + NULL +}; + +static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct dsa_switch *ds = dev_get_drvdata(dev); + struct dsa_switch_driver *drv = ds->drv; + umode_t mode = attr->mode; + + if (index == 1) { + if (!drv->get_temp_limit) + mode = 0; + else if (drv->set_temp_limit) + mode |= S_IWUSR; + } else if (index == 2 && !drv->get_temp_alarm) { + mode = 0; + } + return mode; +} + +static const struct attribute_group dsa_hwmon_group = { + .attrs = dsa_hwmon_attrs, + .is_visible = dsa_hwmon_attrs_visible, +}; +__ATTRIBUTE_GROUPS(dsa_hwmon); + +#endif /* CONFIG_NET_DSA_HWMON */ /* basic switch operations **************************************************/ static struct dsa_switch * @@ -90,12 +192,12 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, */ drv = dsa_switch_probe(host_dev, pd->sw_addr, &name); if (drv == NULL) { - printk(KERN_ERR "%s[%d]: could not detect attached switch\n", - dst->master_netdev->name, index); + netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", + index); return ERR_PTR(-EINVAL); } - printk(KERN_INFO "%s[%d]: detected a %s switch\n", - dst->master_netdev->name, index, name); + netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n", + index, name); /* @@ -123,7 +225,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, if (!strcmp(name, "cpu")) { if (dst->cpu_switch != -1) { - printk(KERN_ERR "multiple cpu ports?!\n"); + netdev_err(dst->master_netdev, + "multiple cpu ports?!\n"); ret = -EINVAL; goto out; } @@ -218,16 +321,39 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, slave_dev = dsa_slave_create(ds, parent, i, pd->port_names[i]); if (slave_dev == NULL) { - printk(KERN_ERR "%s[%d]: can't create dsa " - "slave device for port %d(%s)\n", - dst->master_netdev->name, - index, i, pd->port_names[i]); + netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n", + index, i, pd->port_names[i]); continue; } ds->ports[i] = slave_dev; } +#ifdef CONFIG_NET_DSA_HWMON + /* If the switch provides a temperature sensor, + * register with hardware monitoring subsystem. + * Treat registration error as non-fatal and ignore it. + */ + if (drv->get_temp) { + const char *netname = netdev_name(dst->master_netdev); + char hname[IFNAMSIZ + 1]; + int i, j; + + /* Create valid hwmon 'name' attribute */ + for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) { + if (isalnum(netname[i])) + hname[j++] = netname[i]; + } + hname[j] = '\0'; + scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d", + hname, index); + ds->hwmon_dev = hwmon_device_register_with_groups(NULL, + ds->hwmon_name, ds, dsa_hwmon_groups); + if (IS_ERR(ds->hwmon_dev)) + ds->hwmon_dev = NULL; + } +#endif /* CONFIG_NET_DSA_HWMON */ + return ds; out_free: @@ -239,6 +365,10 @@ out: static void dsa_switch_destroy(struct dsa_switch *ds) { +#ifdef CONFIG_NET_DSA_HWMON + if (ds->hwmon_dev) + hwmon_device_unregister(ds->hwmon_dev); +#endif } #ifdef CONFIG_PM_SLEEP @@ -396,7 +526,8 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, /* First time routing table allocation */ if (!cd->rtable) { - cd->rtable = kmalloc(pd->nr_chips * sizeof(s8), GFP_KERNEL); + cd->rtable = kmalloc_array(pd->nr_chips, sizeof(s8), + GFP_KERNEL); if (!cd->rtable) return -ENOMEM; @@ -447,6 +578,7 @@ static int dsa_of_probe(struct platform_device *pdev) const char *port_name; int chip_index, port_index; const unsigned int *sw_addr, *port_reg; + u32 eeprom_len; int ret; mdio = of_parse_phandle(np, "dsa,mii-bus", 0); @@ -475,8 +607,8 @@ static int dsa_of_probe(struct platform_device *pdev) if (pd->nr_chips > DSA_MAX_SWITCHES) pd->nr_chips = DSA_MAX_SWITCHES; - pd->chip = kzalloc(pd->nr_chips * sizeof(struct dsa_chip_data), - GFP_KERNEL); + pd->chip = kcalloc(pd->nr_chips, sizeof(struct dsa_chip_data), + GFP_KERNEL); if (!pd->chip) { ret = -ENOMEM; goto out_free; @@ -498,6 +630,9 @@ static int dsa_of_probe(struct platform_device *pdev) if (cd->sw_addr > PHY_MAX_ADDR) continue; + if (!of_property_read_u32(np, "eeprom-length", &eeprom_len)) + cd->eeprom_len = eeprom_len; + for_each_available_child_of_node(child, port) { port_reg = of_get_property(port, "reg", NULL); if (!port_reg) @@ -566,15 +701,13 @@ static inline void dsa_of_remove(struct platform_device *pdev) static int dsa_probe(struct platform_device *pdev) { - static int dsa_version_printed; struct dsa_platform_data *pd = pdev->dev.platform_data; struct net_device *dev; struct dsa_switch_tree *dst; int i, ret; - if (!dsa_version_printed++) - printk(KERN_NOTICE "Distributed Switch Architecture " - "driver version %s\n", dsa_driver_version); + pr_notice_once("Distributed Switch Architecture driver version %s\n", + dsa_driver_version); if (pdev->dev.of_node) { ret = dsa_of_probe(pdev); @@ -618,9 +751,8 @@ static int dsa_probe(struct platform_device *pdev) ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev); if (IS_ERR(ds)) { - printk(KERN_ERR "%s[%d]: couldn't create dsa switch " - "instance (error %ld)\n", dev->name, i, - PTR_ERR(ds)); + netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n", + i, PTR_ERR(ds)); continue; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ab03e00ffe8f0e2c4095e4f8c33d79c3e6d069da..528380a3e296001b9c727e9f35e07f403d7f050b 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -249,6 +249,27 @@ static void dsa_slave_get_drvinfo(struct net_device *dev, strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); } +static int dsa_slave_get_regs_len(struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->get_regs_len) + return ds->drv->get_regs_len(ds, p->port); + + return -EOPNOTSUPP; +} + +static void +dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->get_regs) + ds->drv->get_regs(ds, p->port, regs, _p); +} + static int dsa_slave_nway_reset(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); @@ -271,6 +292,44 @@ static u32 dsa_slave_get_link(struct net_device *dev) return -EOPNOTSUPP; } +static int dsa_slave_get_eeprom_len(struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->pd->eeprom_len) + return ds->pd->eeprom_len; + + if (ds->drv->get_eeprom_len) + return ds->drv->get_eeprom_len(ds); + + return 0; +} + +static int dsa_slave_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->get_eeprom) + return ds->drv->get_eeprom(ds, eeprom, data); + + return -EOPNOTSUPP; +} + +static int dsa_slave_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->set_eeprom) + return ds->drv->set_eeprom(ds, eeprom, data); + + return -EOPNOTSUPP; +} + static void dsa_slave_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { @@ -385,8 +444,13 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_settings = dsa_slave_get_settings, .set_settings = dsa_slave_set_settings, .get_drvinfo = dsa_slave_get_drvinfo, + .get_regs_len = dsa_slave_get_regs_len, + .get_regs = dsa_slave_get_regs, .nway_reset = dsa_slave_nway_reset, .get_link = dsa_slave_get_link, + .get_eeprom_len = dsa_slave_get_eeprom_len, + .get_eeprom = dsa_slave_get_eeprom, + .set_eeprom = dsa_slave_set_eeprom, .get_strings = dsa_slave_get_strings, .get_ethtool_stats = dsa_slave_get_ethtool_stats, .get_sset_count = dsa_slave_get_sset_count, @@ -468,7 +532,7 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p, */ ret = of_phy_register_fixed_link(port_dn); if (ret) { - pr_err("failed to register fixed PHY\n"); + netdev_err(slave_dev, "failed to register fixed PHY\n"); return; } phy_is_fixed = true; @@ -494,8 +558,8 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p, phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, p->phy_interface); } else { - pr_info("attached PHY at address %d [%s]\n", - p->phy->addr, p->phy->drv->name); + netdev_info(slave_dev, "attached PHY at address %d [%s]\n", + p->phy->addr, p->phy->drv->name); } } @@ -593,8 +657,8 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, ret = register_netdev(slave_dev); if (ret) { - printk(KERN_ERR "%s: error %d registering interface %s\n", - master->name, ret, slave_dev->name); + netdev_err(master, "error %d registering interface %s\n", + ret, slave_dev->name); free_netdev(slave_dev); return NULL; } diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index ce90c8bdc65864576164f5b431e1d1ea15c573f4..2dab27063273d2d48d12cc13b9d73fefe92c9362 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -63,8 +63,6 @@ static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) dsa_header[3] = 0x00; } - skb->protocol = htons(ETH_P_DSA); - skb->dev = p->parent->dst->master_netdev; dev_queue_xmit(skb); diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 94fcce7786796a8d534dd188ce8a6562e0543c9c..9aeda596f7ec4ec2c30df9167e87c7719535de91 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -76,8 +76,6 @@ static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) edsa_header[7] = 0x00; } - skb->protocol = htons(ETH_P_EDSA); - skb->dev = p->parent->dst->master_netdev; dev_queue_xmit(skb); diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 115fdca34077d420290941e6a264df42255f4136..e268f9db8893deab7c2febd26ad0aae1849b157d 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -57,8 +57,6 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) trailer[2] = 0x10; trailer[3] = 0x00; - nskb->protocol = htons(ETH_P_TRAILER); - nskb->dev = p->parent->dst->master_netdev; dev_queue_xmit(nskb); diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c index 44136297b673a2b2645f481fb723222fa97b951a..27eaa65e88e16ad51a70c33711f6f91d0e8fd61c 100644 --- a/net/ieee802154/6lowpan_rtnl.c +++ b/net/ieee802154/6lowpan_rtnl.c @@ -49,8 +49,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -58,12 +58,13 @@ #include "reassembly.h" static LIST_HEAD(lowpan_devices); +static int lowpan_open_count; /* private device info */ struct lowpan_dev_info { struct net_device *real_dev; /* real WPAN device ptr */ struct mutex dev_list_mtx; /* mutex for list ops */ - __be16 fragment_tag; + u16 fragment_tag; }; struct lowpan_dev_record { @@ -140,24 +141,33 @@ static int lowpan_give_skb_to_devices(struct sk_buff *skb, struct sk_buff *skb_cp; int stat = NET_RX_SUCCESS; + skb->protocol = htons(ETH_P_IPV6); + skb->pkt_type = PACKET_HOST; + rcu_read_lock(); list_for_each_entry_rcu(entry, &lowpan_devices, list) if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) { skb_cp = skb_copy(skb, GFP_ATOMIC); if (!skb_cp) { - stat = -ENOMEM; - break; + kfree_skb(skb); + rcu_read_unlock(); + return NET_RX_DROP; } skb_cp->dev = entry->ldev; stat = netif_rx(skb_cp); + if (stat == NET_RX_DROP) + break; } rcu_read_unlock(); + consume_skb(skb); + return stat; } -static int process_data(struct sk_buff *skb, const struct ieee802154_hdr *hdr) +static int +iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr) { u8 iphc0, iphc1; struct ieee802154_addr_sa sa, da; @@ -166,13 +176,13 @@ static int process_data(struct sk_buff *skb, const struct ieee802154_hdr *hdr) raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); /* at least two bytes will be used for the encoding */ if (skb->len < 2) - goto drop; + return -EINVAL; if (lowpan_fetch_skb_u8(skb, &iphc0)) - goto drop; + return -EINVAL; if (lowpan_fetch_skb_u8(skb, &iphc1)) - goto drop; + return -EINVAL; ieee802154_addr_to_sa(&sa, &hdr->source); ieee802154_addr_to_sa(&da, &hdr->dest); @@ -187,27 +197,9 @@ static int process_data(struct sk_buff *skb, const struct ieee802154_hdr *hdr) else dap = &da.hwaddr; - return lowpan_process_data(skb, skb->dev, sap, sa.addr_type, - IEEE802154_ADDR_LEN, dap, da.addr_type, - IEEE802154_ADDR_LEN, iphc0, iphc1, - lowpan_give_skb_to_devices); - -drop: - kfree_skb(skb); - return -EINVAL; -} - -static int lowpan_set_address(struct net_device *dev, void *p) -{ - struct sockaddr *sa = p; - - if (netif_running(dev)) - return -EBUSY; - - /* TODO: validate addr */ - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); - - return 0; + return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type, + IEEE802154_ADDR_LEN, dap, da.addr_type, + IEEE802154_ADDR_LEN, iphc0, iphc1); } static struct sk_buff* @@ -233,7 +225,7 @@ lowpan_alloc_frag(struct sk_buff *skb, int size, &master_hdr->source, size); if (rc < 0) { kfree_skb(frag); - return ERR_PTR(-rc); + return ERR_PTR(rc); } } else { frag = ERR_PTR(-ENOMEM); @@ -275,7 +267,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev, dgram_size = lowpan_uncompress_size(skb, &dgram_offset) - skb->mac_len; - frag_tag = lowpan_dev_info(dev)->fragment_tag++; + frag_tag = htons(lowpan_dev_info(dev)->fragment_tag); + lowpan_dev_info(dev)->fragment_tag++; frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07); frag_hdr[1] = dgram_size & 0xff; @@ -294,7 +287,7 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev, frag_len + skb_network_header_len(skb)); if (rc) { pr_debug("%s unable to send FRAG1 packet (tag: %d)", - __func__, frag_tag); + __func__, ntohs(frag_tag)); goto err; } @@ -315,7 +308,7 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev, frag_len); if (rc) { pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n", - __func__, frag_tag, skb_offset); + __func__, ntohs(frag_tag), skb_offset); goto err; } } while (skb_unprocessed > frag_cap); @@ -410,13 +403,6 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) } } -static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) -{ - struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; - - return ieee802154_mlme_ops(real_dev)->get_phy(real_dev); -} - static __le16 lowpan_get_pan_id(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; @@ -453,7 +439,6 @@ static void lowpan_set_lockdep_class_one(struct net_device *dev, &lowpan_netdev_xmit_lock_key); } - static int lowpan_dev_init(struct net_device *dev) { netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL); @@ -464,12 +449,10 @@ static int lowpan_dev_init(struct net_device *dev) static const struct net_device_ops lowpan_netdev_ops = { .ndo_init = lowpan_dev_init, .ndo_start_xmit = lowpan_xmit, - .ndo_set_mac_address = lowpan_set_address, }; static struct ieee802154_mlme_ops lowpan_mlme = { .get_pan_id = lowpan_get_pan_id, - .get_phy = lowpan_get_phy, .get_short_addr = lowpan_get_short_addr, .get_dsn = lowpan_get_dsn, }; @@ -515,6 +498,9 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, if (!netif_running(dev)) goto drop_skb; + if (skb->pkt_type == PACKET_OTHERHOST) + goto drop_skb; + if (dev->type != ARPHRD_IEEE802154) goto drop_skb; @@ -523,55 +509,67 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, /* check that it's our buffer */ if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { - skb->protocol = htons(ETH_P_IPV6); - skb->pkt_type = PACKET_HOST; - /* Pull off the 1-byte of 6lowpan header. */ skb_pull(skb, 1); - - ret = lowpan_give_skb_to_devices(skb, NULL); - if (ret == NET_RX_DROP) - goto drop; + return lowpan_give_skb_to_devices(skb, NULL); } else { switch (skb->data[0] & 0xe0) { case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ - ret = process_data(skb, &hdr); - if (ret == NET_RX_DROP) - goto drop; - break; + ret = iphc_decompress(skb, &hdr); + if (ret < 0) + goto drop_skb; + + return lowpan_give_skb_to_devices(skb, NULL); case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1); if (ret == 1) { - ret = process_data(skb, &hdr); - if (ret == NET_RX_DROP) - goto drop; + ret = iphc_decompress(skb, &hdr); + if (ret < 0) + goto drop_skb; + + return lowpan_give_skb_to_devices(skb, NULL); + } else if (ret == -1) { + return NET_RX_DROP; + } else { + return NET_RX_SUCCESS; } - break; case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN); if (ret == 1) { - ret = process_data(skb, &hdr); - if (ret == NET_RX_DROP) - goto drop; + ret = iphc_decompress(skb, &hdr); + if (ret < 0) + goto drop_skb; + + return lowpan_give_skb_to_devices(skb, NULL); + } else if (ret == -1) { + return NET_RX_DROP; + } else { + return NET_RX_SUCCESS; } - break; default: break; } } - return NET_RX_SUCCESS; drop_skb: kfree_skb(skb); drop: return NET_RX_DROP; } +static struct packet_type lowpan_packet_type = { + .type = htons(ETH_P_IEEE802154), + .func = lowpan_rcv, +}; + static int lowpan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_device *real_dev; struct lowpan_dev_record *entry; + int ret; + + ASSERT_RTNL(); pr_debug("adding new link\n"); @@ -598,7 +596,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, entry->ldev = dev; - /* Set the lowpan harware address to the wpan hardware address. */ + /* Set the lowpan hardware address to the wpan hardware address. */ memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN); mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); @@ -606,9 +604,14 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, list_add_tail(&entry->list, &lowpan_devices); mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); - register_netdevice(dev); + ret = register_netdevice(dev); + if (ret >= 0) { + if (!lowpan_open_count) + dev_add_pack(&lowpan_packet_type); + lowpan_open_count++; + } - return 0; + return ret; } static void lowpan_dellink(struct net_device *dev, struct list_head *head) @@ -619,6 +622,10 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head) ASSERT_RTNL(); + lowpan_open_count--; + if (!lowpan_open_count) + dev_remove_pack(&lowpan_packet_type); + mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { if (entry->ldev == dev) { @@ -681,11 +688,6 @@ static struct notifier_block lowpan_dev_notifier = { .notifier_call = lowpan_device_event, }; -static struct packet_type lowpan_packet_type = { - .type = htons(ETH_P_IEEE802154), - .func = lowpan_rcv, -}; - static int __init lowpan_init_module(void) { int err = 0; @@ -698,8 +700,6 @@ static int __init lowpan_init_module(void) if (err < 0) goto out_frag; - dev_add_pack(&lowpan_packet_type); - err = register_netdevice_notifier(&lowpan_dev_notifier); if (err < 0) goto out_pack; @@ -707,7 +707,6 @@ static int __init lowpan_init_module(void) return 0; out_pack: - dev_remove_pack(&lowpan_packet_type); lowpan_netlink_fini(); out_frag: lowpan_net_frag_exit(); @@ -719,8 +718,6 @@ static void __exit lowpan_cleanup_module(void) { lowpan_netlink_fini(); - dev_remove_pack(&lowpan_packet_type); - lowpan_net_frag_exit(); unregister_netdevice_notifier(&lowpan_dev_notifier); diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile index 3914b1ed42743c50aaa4fe0a40927133cf25dae1..9f6970f2a28b9e9d6840ccbef56efdf3d19b2531 100644 --- a/net/ieee802154/Makefile +++ b/net/ieee802154/Makefile @@ -2,8 +2,8 @@ obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o obj-$(CONFIG_IEEE802154_6LOWPAN) += ieee802154_6lowpan.o ieee802154_6lowpan-y := 6lowpan_rtnl.o reassembly.o -ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o \ - header_ops.o +ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \ + header_ops.o sysfs.o nl802154.o af_802154-y := af_ieee802154.o raw.o dgram.o ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/ieee802154/af802154.h b/net/ieee802154/af802154.h index 8330a09bfc95e6877351029cff36b542af11c66f..343b63e6f95378ff1ee31cf07706128bb212861a 100644 --- a/net/ieee802154/af802154.h +++ b/net/ieee802154/af802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Dmitry Eremin-Solenikov diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index 29e0de63001b68930f11eab46e6d444a8ee69b96..d0a1282cdf43560750da759a2dfe12f98ced8815 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Maxim Gorbachyov @@ -103,6 +99,7 @@ static int ieee802154_sock_release(struct socket *sock) } return 0; } + static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { @@ -235,7 +232,6 @@ static const struct proto_ops ieee802154_dgram_ops = { #endif }; - /* Create a socket. Initialise the socket, blank the addresses * set the state. */ @@ -324,7 +320,6 @@ drop: return NET_RX_DROP; } - static struct packet_type ieee802154_packet_type = { .type = htons(ETH_P_IEEE802154), .func = ieee802154_rcv, @@ -358,6 +353,7 @@ err_dgram: out: return rc; } + static void __exit af_ieee802154_remove(void) { dev_remove_pack(&ieee802154_packet_type); diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c new file mode 100644 index 0000000000000000000000000000000000000000..18bc7e7385074c4b62e5e7c87c1d2bba3981e7a4 --- /dev/null +++ b/net/ieee802154/core.c @@ -0,0 +1,321 @@ +/* + * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include + +#include "ieee802154.h" +#include "nl802154.h" +#include "sysfs.h" +#include "core.h" + +/* RCU-protected (and RTNL for writers) */ +LIST_HEAD(cfg802154_rdev_list); +int cfg802154_rdev_list_generation; + +static int wpan_phy_match(struct device *dev, const void *data) +{ + return !strcmp(dev_name(dev), (const char *)data); +} + +struct wpan_phy *wpan_phy_find(const char *str) +{ + struct device *dev; + + if (WARN_ON(!str)) + return NULL; + + dev = class_find_device(&wpan_phy_class, NULL, str, wpan_phy_match); + if (!dev) + return NULL; + + return container_of(dev, struct wpan_phy, dev); +} +EXPORT_SYMBOL(wpan_phy_find); + +struct wpan_phy_iter_data { + int (*fn)(struct wpan_phy *phy, void *data); + void *data; +}; + +static int wpan_phy_iter(struct device *dev, void *_data) +{ + struct wpan_phy_iter_data *wpid = _data; + struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); + + return wpid->fn(phy, wpid->data); +} + +int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), + void *data) +{ + struct wpan_phy_iter_data wpid = { + .fn = fn, + .data = data, + }; + + return class_for_each_device(&wpan_phy_class, NULL, + &wpid, wpan_phy_iter); +} +EXPORT_SYMBOL(wpan_phy_for_each); + +struct cfg802154_registered_device * +cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx) +{ + struct cfg802154_registered_device *result = NULL, *rdev; + + ASSERT_RTNL(); + + list_for_each_entry(rdev, &cfg802154_rdev_list, list) { + if (rdev->wpan_phy_idx == wpan_phy_idx) { + result = rdev; + break; + } + } + + return result; +} + +struct wpan_phy * +wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size) +{ + static atomic_t wpan_phy_counter = ATOMIC_INIT(0); + struct cfg802154_registered_device *rdev; + size_t alloc_size; + + alloc_size = sizeof(*rdev) + priv_size; + rdev = kzalloc(alloc_size, GFP_KERNEL); + if (!rdev) + return NULL; + + rdev->ops = ops; + + rdev->wpan_phy_idx = atomic_inc_return(&wpan_phy_counter); + + if (unlikely(rdev->wpan_phy_idx < 0)) { + /* ugh, wrapped! */ + atomic_dec(&wpan_phy_counter); + kfree(rdev); + return NULL; + } + + /* atomic_inc_return makes it start at 1, make it start at 0 */ + rdev->wpan_phy_idx--; + + mutex_init(&rdev->wpan_phy.pib_lock); + + INIT_LIST_HEAD(&rdev->wpan_dev_list); + device_initialize(&rdev->wpan_phy.dev); + dev_set_name(&rdev->wpan_phy.dev, "wpan-phy%d", rdev->wpan_phy_idx); + + rdev->wpan_phy.dev.class = &wpan_phy_class; + rdev->wpan_phy.dev.platform_data = rdev; + + init_waitqueue_head(&rdev->dev_wait); + + return &rdev->wpan_phy; +} +EXPORT_SYMBOL(wpan_phy_new); + +int wpan_phy_register(struct wpan_phy *phy) +{ + struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy); + int ret; + + rtnl_lock(); + ret = device_add(&phy->dev); + if (ret) { + rtnl_unlock(); + return ret; + } + + list_add_rcu(&rdev->list, &cfg802154_rdev_list); + cfg802154_rdev_list_generation++; + + /* TODO phy registered lock */ + rtnl_unlock(); + + /* TODO nl802154 phy notify */ + + return 0; +} +EXPORT_SYMBOL(wpan_phy_register); + +void wpan_phy_unregister(struct wpan_phy *phy) +{ + struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy); + + wait_event(rdev->dev_wait, ({ + int __count; + rtnl_lock(); + __count = rdev->opencount; + rtnl_unlock(); + __count == 0; })); + + rtnl_lock(); + /* TODO nl802154 phy notify */ + /* TODO phy registered lock */ + + WARN_ON(!list_empty(&rdev->wpan_dev_list)); + + /* First remove the hardware from everywhere, this makes + * it impossible to find from userspace. + */ + list_del_rcu(&rdev->list); + synchronize_rcu(); + + cfg802154_rdev_list_generation++; + + device_del(&phy->dev); + + rtnl_unlock(); +} +EXPORT_SYMBOL(wpan_phy_unregister); + +void wpan_phy_free(struct wpan_phy *phy) +{ + put_device(&phy->dev); +} +EXPORT_SYMBOL(wpan_phy_free); + +void cfg802154_dev_free(struct cfg802154_registered_device *rdev) +{ + kfree(rdev); +} + +static void +cfg802154_update_iface_num(struct cfg802154_registered_device *rdev, + int iftype, int num) +{ + ASSERT_RTNL(); + + rdev->num_running_ifaces += num; +} + +static int cfg802154_netdev_notifier_call(struct notifier_block *nb, + unsigned long state, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct cfg802154_registered_device *rdev; + + if (!wpan_dev) + return NOTIFY_DONE; + + rdev = wpan_phy_to_rdev(wpan_dev->wpan_phy); + + /* TODO WARN_ON unspec type */ + + switch (state) { + /* TODO NETDEV_DEVTYPE */ + case NETDEV_REGISTER: + wpan_dev->identifier = ++rdev->wpan_dev_id; + list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list); + rdev->devlist_generation++; + + wpan_dev->netdev = dev; + break; + case NETDEV_DOWN: + cfg802154_update_iface_num(rdev, wpan_dev->iftype, -1); + + rdev->opencount--; + wake_up(&rdev->dev_wait); + break; + case NETDEV_UP: + cfg802154_update_iface_num(rdev, wpan_dev->iftype, 1); + + rdev->opencount++; + break; + case NETDEV_UNREGISTER: + /* It is possible to get NETDEV_UNREGISTER + * multiple times. To detect that, check + * that the interface is still on the list + * of registered interfaces, and only then + * remove and clean it up. + */ + if (!list_empty(&wpan_dev->list)) { + list_del_rcu(&wpan_dev->list); + rdev->devlist_generation++; + } + /* synchronize (so that we won't find this netdev + * from other code any more) and then clear the list + * head so that the above code can safely check for + * !list_empty() to avoid double-cleanup. + */ + synchronize_rcu(); + INIT_LIST_HEAD(&wpan_dev->list); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static struct notifier_block cfg802154_netdev_notifier = { + .notifier_call = cfg802154_netdev_notifier_call, +}; + +static int __init wpan_phy_class_init(void) +{ + int rc; + + rc = wpan_phy_sysfs_init(); + if (rc) + goto err; + + rc = register_netdevice_notifier(&cfg802154_netdev_notifier); + if (rc) + goto err_nl; + + rc = ieee802154_nl_init(); + if (rc) + goto err_notifier; + + rc = nl802154_init(); + if (rc) + goto err_ieee802154_nl; + + return 0; + +err_ieee802154_nl: + ieee802154_nl_exit(); + +err_notifier: + unregister_netdevice_notifier(&cfg802154_netdev_notifier); +err_nl: + wpan_phy_sysfs_exit(); +err: + return rc; +} +subsys_initcall(wpan_phy_class_init); + +static void __exit wpan_phy_class_exit(void) +{ + nl802154_exit(); + ieee802154_nl_exit(); + unregister_netdevice_notifier(&cfg802154_netdev_notifier); + wpan_phy_sysfs_exit(); +} +module_exit(wpan_phy_class_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface"); +MODULE_AUTHOR("Dmitry Eremin-Solenikov"); + diff --git a/net/ieee802154/core.h b/net/ieee802154/core.h new file mode 100644 index 0000000000000000000000000000000000000000..f3e95580caee0e96ada8bee0bb2a2e72aa21eda3 --- /dev/null +++ b/net/ieee802154/core.h @@ -0,0 +1,46 @@ +#ifndef __IEEE802154_CORE_H +#define __IEEE802154_CORE_H + +#include + +struct cfg802154_registered_device { + const struct cfg802154_ops *ops; + struct list_head list; + + /* wpan_phy index, internal only */ + int wpan_phy_idx; + + /* also protected by devlist_mtx */ + int opencount; + wait_queue_head_t dev_wait; + + /* protected by RTNL only */ + int num_running_ifaces; + + /* associated wpan interfaces, protected by rtnl or RCU */ + struct list_head wpan_dev_list; + int devlist_generation, wpan_dev_id; + + /* must be last because of the way we do wpan_phy_priv(), + * and it should at least be aligned to NETDEV_ALIGN + */ + struct wpan_phy wpan_phy __aligned(NETDEV_ALIGN); +}; + +static inline struct cfg802154_registered_device * +wpan_phy_to_rdev(struct wpan_phy *wpan_phy) +{ + BUG_ON(!wpan_phy); + return container_of(wpan_phy, struct cfg802154_registered_device, + wpan_phy); +} + +extern struct list_head cfg802154_rdev_list; +extern int cfg802154_rdev_list_generation; + +/* free object */ +void cfg802154_dev_free(struct cfg802154_registered_device *rdev); +struct cfg802154_registered_device * +cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx); + +#endif /* __IEEE802154_CORE_H */ diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index ef2ad8aaef1361f2e061519dd773cd0f03dd4b30..d1930b70c4aacf707e4463a0ca786a66cf4e7501 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Dmitry Eremin-Solenikov @@ -27,9 +23,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -158,7 +154,6 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg) spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } - } return -ENOIOCTLCMD; @@ -280,7 +275,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, if (err < 0) goto out_skb; - err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); + err = memcpy_from_msg(skb_put(skb, size), msg, size); if (err < 0) goto out_skb; @@ -324,7 +319,7 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, } /* FIXME: skip headers if necessary ?! */ - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c index c09294e39ca60326d5b40c8431bf202ce5559225..a051b69931779438b33f61456203c451167fa591 100644 --- a/net/ieee802154/header_ops.c +++ b/net/ieee802154/header_ops.c @@ -14,8 +14,9 @@ * Phoebe Buckheister */ +#include + #include -#include #include static int diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h index 5d352f86979e40b191e08d4bb1d63100560fd1b6..a5d7515b7f6256424eb26ce4f07250571ce7f6f7 100644 --- a/net/ieee802154/ieee802154.h +++ b/net/ieee802154/ieee802154.h @@ -10,16 +10,12 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * */ #ifndef IEEE_802154_LOCAL_H #define IEEE_802154_LOCAL_H int __init ieee802154_nl_init(void); -void __exit ieee802154_nl_exit(void); +void ieee802154_nl_exit(void); #define IEEE802154_OP(_cmd, _func) \ { \ diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c index 9222966f5e6d7438183825ffe306a3c4ab502b10..fa1464762d0dafdc481b68a7cbf46cfd45d8b30a 100644 --- a/net/ieee802154/netlink.c +++ b/net/ieee802154/netlink.c @@ -1,5 +1,5 @@ /* - * Netlink inteface for IEEE 802.15.4 stack + * Netlink interface for IEEE 802.15.4 stack * * Copyright 2007, 2008 Siemens AG * @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Dmitry Eremin-Solenikov @@ -77,7 +73,7 @@ out: } struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info, - int flags, u8 req) + int flags, u8 req) { void *hdr; struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); @@ -151,7 +147,6 @@ static const struct genl_multicast_group ieee802154_mcgrps[] = { [IEEE802154_BEACON_MCGRP] = { .name = IEEE802154_MCAST_BEACON_NAME, }, }; - int __init ieee802154_nl_init(void) { return genl_register_family_with_ops_groups(&nl802154_family, @@ -159,7 +154,7 @@ int __init ieee802154_nl_init(void) ieee802154_mcgrps); } -void __exit ieee802154_nl_exit(void) +void ieee802154_nl_exit(void) { genl_unregister_family(&nl802154_family); } diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index c6bfe22bfa5ebedff765e6dcd4bf7bebd26d0b49..cd919493c976000e6b5cd206014110500e68886e 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -1,5 +1,5 @@ /* - * Netlink inteface for IEEE 802.15.4 stack + * Netlink interface for IEEE 802.15.4 stack * * Copyright 2007, 2008 Siemens AG * @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Dmitry Eremin-Solenikov @@ -26,16 +22,15 @@ #include #include #include +#include #include #include #include #include #include #include -#include -#include #include -#include +#include #include "ieee802154.h" @@ -59,186 +54,7 @@ static __le16 nla_get_shortaddr(const struct nlattr *nla) return cpu_to_le16(nla_get_u16(nla)); } -int ieee802154_nl_assoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, - u8 cap) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - if (addr->mode != IEEE802154_ADDR_LONG) { - pr_err("%s: received non-long source address!\n", __func__); - return -EINVAL; - } - - msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_hwaddr(msg, IEEE802154_ATTR_SRC_HW_ADDR, - addr->extended_addr) || - nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap)) - goto nla_put_failure; - - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_assoc_indic); - -int ieee802154_nl_assoc_confirm(struct net_device *dev, __le16 short_addr, - u8 status) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) || - nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_assoc_confirm); - -int ieee802154_nl_disassoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, - u8 reason) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr)) - goto nla_put_failure; - if (addr->mode == IEEE802154_ADDR_LONG) { - if (nla_put_hwaddr(msg, IEEE802154_ATTR_SRC_HW_ADDR, - addr->extended_addr)) - goto nla_put_failure; - } else { - if (nla_put_shortaddr(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, - addr->short_addr)) - goto nla_put_failure; - } - if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason)) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_disassoc_indic); - -int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm); - -int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid, - __le16 coord_addr) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_shortaddr(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, - coord_addr) || - nla_put_shortaddr(msg, IEEE802154_ATTR_COORD_PAN_ID, panid)) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_beacon_indic); - -int ieee802154_nl_scan_confirm(struct net_device *dev, - u8 status, u8 scan_type, - u32 unscanned, u8 page, - u8 *edl/* , struct list_head *pan_desc_list */) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) || - nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) || - nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) || - nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) || - (edl && - nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl))) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_scan_confirm); - -int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) +static int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) { struct sk_buff *msg; @@ -278,8 +94,9 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, goto out; ops = ieee802154_mlme_ops(dev); - phy = ops->get_phy(dev); + phy = dev->ieee802154_ptr->wpan_phy; BUG_ON(!phy); + get_device(&phy->dev); short_addr = ops->get_short_addr(dev); pan_id = ops->get_pan_id(dev); @@ -296,7 +113,9 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, if (ops->get_mac_params) { struct ieee802154_mac_params params; + rtnl_lock(); ops->get_mac_params(dev, ¶ms); + rtnl_unlock(); if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER, params.transmit_power) || @@ -347,7 +166,10 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) if (!dev) return NULL; - if (dev->type != ARPHRD_IEEE802154) { + /* Check on mtu is currently a hacked solution because lowpan + * and wpan have the same ARPHRD type. + */ + if (dev->type != ARPHRD_IEEE802154 || dev->mtu != IEEE802154_MTU) { dev_put(dev); return NULL; } @@ -481,7 +303,7 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) u8 channel, bcn_ord, sf_ord; u8 page; int pan_coord, blx, coord_realign; - int ret = -EOPNOTSUPP; + int ret = -EBUSY; if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] || @@ -497,9 +319,15 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; - if (!ieee802154_mlme_ops(dev)->start_req) + + if (netif_running(dev)) goto out; + if (!ieee802154_mlme_ops(dev)->start_req) { + ret = -EOPNOTSUPP; + goto out; + } + addr.mode = IEEE802154_ADDR_SHORT; addr.short_addr = nla_get_shortaddr( info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); @@ -518,15 +346,21 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) else page = 0; - if (addr.short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) { ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS); dev_put(dev); return -EINVAL; } + rtnl_lock(); ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, bcn_ord, sf_ord, pan_coord, blx, coord_realign); + rtnl_unlock(); + + /* FIXME: add validation for unused parameters to be sane + * for SoftMAC + */ + ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS); out: dev_put(dev); @@ -562,7 +396,6 @@ int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) else page = 0; - ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page, duration); @@ -616,7 +449,11 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb) idx = 0; for_each_netdev(net, dev) { - if (idx < s_idx || (dev->type != ARPHRD_IEEE802154)) + /* Check on mtu is currently a hacked solution because lowpan + * and wpan have the same ARPHRD type. + */ + if (idx < s_idx || dev->type != ARPHRD_IEEE802154 || + dev->mtu != IEEE802154_MTU) goto cont; if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid, @@ -666,22 +503,10 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info) !info->attrs[IEEE802154_ATTR_FRAME_RETRIES]) goto out; - phy = ops->get_phy(dev); - - if ((!phy->set_lbt && info->attrs[IEEE802154_ATTR_LBT_ENABLED]) || - (!phy->set_cca_mode && info->attrs[IEEE802154_ATTR_CCA_MODE]) || - (!phy->set_cca_ed_level && - info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) || - (!phy->set_csma_params && - (info->attrs[IEEE802154_ATTR_CSMA_RETRIES] || - info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] || - info->attrs[IEEE802154_ATTR_CSMA_MAX_BE])) || - (!phy->set_frame_retries && - info->attrs[IEEE802154_ATTR_FRAME_RETRIES])) { - rc = -EOPNOTSUPP; - goto out_phy; - } + phy = dev->ieee802154_ptr->wpan_phy; + get_device(&phy->dev); + rtnl_lock(); ops->get_mac_params(dev, ¶ms); if (info->attrs[IEEE802154_ATTR_TXPOWER]) @@ -709,20 +534,18 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info) params.frame_retries = nla_get_s8(info->attrs[IEEE802154_ATTR_FRAME_RETRIES]); rc = ops->set_mac_params(dev, ¶ms); + rtnl_unlock(); wpan_phy_put(phy); dev_put(dev); - return rc; -out_phy: - wpan_phy_put(phy); + return 0; + out: dev_put(dev); return rc; } - - static int ieee802154_llsec_parse_key_id(struct genl_info *info, struct ieee802154_llsec_key_id *desc) @@ -938,8 +761,6 @@ out: return rc; } - - struct llsec_dump_data { struct sk_buff *skb; int s_idx, s_idx2; @@ -962,7 +783,11 @@ ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb, int rc; for_each_netdev(net, dev) { - if (idx < first_dev || dev->type != ARPHRD_IEEE802154) + /* Check on mtu is currently a hacked solution because lowpan + * and wpan have the same ARPHRD type. + */ + if (idx < first_dev || dev->type != ARPHRD_IEEE802154 || + dev->mtu != IEEE802154_MTU) goto skip; data.ops = ieee802154_mlme_ops(dev); @@ -1012,8 +837,6 @@ ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info, return rc; } - - static int ieee802154_llsec_parse_key(struct genl_info *info, struct ieee802154_llsec_key *key) @@ -1158,8 +981,6 @@ int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb) return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys); } - - static int llsec_parse_dev(struct genl_info *info, struct ieee802154_llsec_device *dev) @@ -1290,8 +1111,6 @@ int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb) return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs); } - - static int llsec_add_devkey(struct net_device *dev, struct genl_info *info) { struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); @@ -1406,8 +1225,6 @@ int ieee802154_llsec_dump_devkeys(struct sk_buff *skb, return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys); } - - static int llsec_parse_seclevel(struct genl_info *info, struct ieee802154_llsec_seclevel *sl) diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index 972baf83411af7c64a6f11826ff7b1ab61b43ec4..7baf98b146116bd52f961c371d31a33878c0b8d8 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -1,5 +1,5 @@ /* - * Netlink inteface for IEEE 802.15.4 stack + * Netlink interface for IEEE 802.15.4 stack * * Copyright 2007, 2008 Siemens AG * @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Dmitry Eremin-Solenikov @@ -27,13 +23,15 @@ #include #include #include -#include +#include #include #include #include /* for rtnl_{un,}lock */ #include #include "ieee802154.h" +#include "rdev-ops.h" +#include "core.h" static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct wpan_phy *phy) @@ -96,7 +94,6 @@ int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info) if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0') return -EINVAL; /* phy name should be null-terminated */ - phy = wpan_phy_find(name); if (!phy) return -ENODEV; @@ -207,11 +204,6 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) if (!msg) goto out_dev; - if (!phy->add_iface) { - rc = -EINVAL; - goto nla_put_failure; - } - if (info->attrs[IEEE802154_ATTR_HW_ADDR] && nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) != IEEE802154_ADDR_LEN) { @@ -227,11 +219,13 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) } } - dev = phy->add_iface(phy, devname, type); + dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname, + type); if (IS_ERR(dev)) { rc = PTR_ERR(dev); goto nla_put_failure; } + dev_hold(dev); if (info->attrs[IEEE802154_ATTR_HW_ADDR]) { struct sockaddr addr; @@ -261,7 +255,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) dev_unregister: rtnl_lock(); /* del_iface must be called with RTNL lock */ - phy->del_iface(phy, dev); + rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev); dev_put(dev); rtnl_unlock(); nla_put_failure: @@ -292,8 +286,9 @@ int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info) if (!dev) return -ENODEV; - phy = ieee802154_mlme_ops(dev)->get_phy(dev); + phy = dev->ieee802154_ptr->wpan_phy; BUG_ON(!phy); + get_device(&phy->dev); rc = -EINVAL; /* phy name is optional, but should be checked if it's given */ @@ -323,13 +318,8 @@ int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info) if (!msg) goto out_dev; - if (!phy->del_iface) { - rc = -EINVAL; - goto nla_put_failure; - } - rtnl_lock(); - phy->del_iface(phy, dev); + rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev); /* We don't have device anymore */ dev_put(dev); diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c new file mode 100644 index 0000000000000000000000000000000000000000..8896477446977a7b4fdab732aec1308c60a46d8b --- /dev/null +++ b/net/ieee802154/nl802154.c @@ -0,0 +1,957 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Authors: + * Alexander Aring + * + * Based on: net/wireless/nl80211.c + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "nl802154.h" +#include "rdev-ops.h" +#include "core.h" + +static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, + struct genl_info *info); + +static void nl802154_post_doit(const struct genl_ops *ops, struct sk_buff *skb, + struct genl_info *info); + +/* the netlink family */ +static struct genl_family nl802154_fam = { + .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ + .name = NL802154_GENL_NAME, /* have users key off the name instead */ + .hdrsize = 0, /* no private header */ + .version = 1, /* no particular meaning now */ + .maxattr = NL802154_ATTR_MAX, + .netnsok = true, + .pre_doit = nl802154_pre_doit, + .post_doit = nl802154_post_doit, +}; + +/* multicast groups */ +enum nl802154_multicast_groups { + NL802154_MCGRP_CONFIG, +}; + +static const struct genl_multicast_group nl802154_mcgrps[] = { + [NL802154_MCGRP_CONFIG] = { .name = "config", }, +}; + +/* returns ERR_PTR values */ +static struct wpan_dev * +__cfg802154_wpan_dev_from_attrs(struct net *netns, struct nlattr **attrs) +{ + struct cfg802154_registered_device *rdev; + struct wpan_dev *result = NULL; + bool have_ifidx = attrs[NL802154_ATTR_IFINDEX]; + bool have_wpan_dev_id = attrs[NL802154_ATTR_WPAN_DEV]; + u64 wpan_dev_id; + int wpan_phy_idx = -1; + int ifidx = -1; + + ASSERT_RTNL(); + + if (!have_ifidx && !have_wpan_dev_id) + return ERR_PTR(-EINVAL); + + if (have_ifidx) + ifidx = nla_get_u32(attrs[NL802154_ATTR_IFINDEX]); + if (have_wpan_dev_id) { + wpan_dev_id = nla_get_u64(attrs[NL802154_ATTR_WPAN_DEV]); + wpan_phy_idx = wpan_dev_id >> 32; + } + + list_for_each_entry(rdev, &cfg802154_rdev_list, list) { + struct wpan_dev *wpan_dev; + + /* TODO netns compare */ + + if (have_wpan_dev_id && rdev->wpan_phy_idx != wpan_phy_idx) + continue; + + list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) { + if (have_ifidx && wpan_dev->netdev && + wpan_dev->netdev->ifindex == ifidx) { + result = wpan_dev; + break; + } + if (have_wpan_dev_id && + wpan_dev->identifier == (u32)wpan_dev_id) { + result = wpan_dev; + break; + } + } + + if (result) + break; + } + + if (result) + return result; + + return ERR_PTR(-ENODEV); +} + +static struct cfg802154_registered_device * +__cfg802154_rdev_from_attrs(struct net *netns, struct nlattr **attrs) +{ + struct cfg802154_registered_device *rdev = NULL, *tmp; + struct net_device *netdev; + + ASSERT_RTNL(); + + if (!attrs[NL802154_ATTR_WPAN_PHY] && + !attrs[NL802154_ATTR_IFINDEX] && + !attrs[NL802154_ATTR_WPAN_DEV]) + return ERR_PTR(-EINVAL); + + if (attrs[NL802154_ATTR_WPAN_PHY]) + rdev = cfg802154_rdev_by_wpan_phy_idx( + nla_get_u32(attrs[NL802154_ATTR_WPAN_PHY])); + + if (attrs[NL802154_ATTR_WPAN_DEV]) { + u64 wpan_dev_id = nla_get_u64(attrs[NL802154_ATTR_WPAN_DEV]); + struct wpan_dev *wpan_dev; + bool found = false; + + tmp = cfg802154_rdev_by_wpan_phy_idx(wpan_dev_id >> 32); + if (tmp) { + /* make sure wpan_dev exists */ + list_for_each_entry(wpan_dev, &tmp->wpan_dev_list, list) { + if (wpan_dev->identifier != (u32)wpan_dev_id) + continue; + found = true; + break; + } + + if (!found) + tmp = NULL; + + if (rdev && tmp != rdev) + return ERR_PTR(-EINVAL); + rdev = tmp; + } + } + + if (attrs[NL802154_ATTR_IFINDEX]) { + int ifindex = nla_get_u32(attrs[NL802154_ATTR_IFINDEX]); + + netdev = __dev_get_by_index(netns, ifindex); + if (netdev) { + if (netdev->ieee802154_ptr) + tmp = wpan_phy_to_rdev( + netdev->ieee802154_ptr->wpan_phy); + else + tmp = NULL; + + /* not wireless device -- return error */ + if (!tmp) + return ERR_PTR(-EINVAL); + + /* mismatch -- return error */ + if (rdev && tmp != rdev) + return ERR_PTR(-EINVAL); + + rdev = tmp; + } + } + + if (!rdev) + return ERR_PTR(-ENODEV); + + /* TODO netns compare */ + + return rdev; +} + +/* This function returns a pointer to the driver + * that the genl_info item that is passed refers to. + * + * The result of this can be a PTR_ERR and hence must + * be checked with IS_ERR() for errors. + */ +static struct cfg802154_registered_device * +cfg802154_get_dev_from_info(struct net *netns, struct genl_info *info) +{ + return __cfg802154_rdev_from_attrs(netns, info->attrs); +} + +/* policy for the attributes */ +static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = { + [NL802154_ATTR_WPAN_PHY] = { .type = NLA_U32 }, + [NL802154_ATTR_WPAN_PHY_NAME] = { .type = NLA_NUL_STRING, + .len = 20-1 }, + + [NL802154_ATTR_IFINDEX] = { .type = NLA_U32 }, + [NL802154_ATTR_IFTYPE] = { .type = NLA_U32 }, + [NL802154_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, + + [NL802154_ATTR_WPAN_DEV] = { .type = NLA_U64 }, + + [NL802154_ATTR_PAGE] = { .type = NLA_U8, }, + [NL802154_ATTR_CHANNEL] = { .type = NLA_U8, }, + + [NL802154_ATTR_TX_POWER] = { .type = NLA_S8, }, + + [NL802154_ATTR_CCA_MODE] = { .type = NLA_U8, }, + + [NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, }, + + [NL802154_ATTR_PAN_ID] = { .type = NLA_U16, }, + [NL802154_ATTR_EXTENDED_ADDR] = { .type = NLA_U64 }, + [NL802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, }, + + [NL802154_ATTR_MIN_BE] = { .type = NLA_U8, }, + [NL802154_ATTR_MAX_BE] = { .type = NLA_U8, }, + [NL802154_ATTR_MAX_CSMA_BACKOFFS] = { .type = NLA_U8, }, + + [NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, }, + + [NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, }, +}; + +/* message building helper */ +static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq, + int flags, u8 cmd) +{ + /* since there is no private header just add the generic one */ + return genlmsg_put(skb, portid, seq, &nl802154_fam, flags, cmd); +} + +static int +nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev, + struct sk_buff *msg) +{ + struct nlattr *nl_page; + unsigned long page; + + nl_page = nla_nest_start(msg, NL802154_ATTR_CHANNELS_SUPPORTED); + if (!nl_page) + return -ENOBUFS; + + for (page = 0; page <= IEEE802154_MAX_PAGE; page++) { + if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL, + rdev->wpan_phy.channels_supported[page])) + return -ENOBUFS; + } + nla_nest_end(msg, nl_page); + + return 0; +} + +static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, + enum nl802154_commands cmd, + struct sk_buff *msg, u32 portid, u32 seq, + int flags) +{ + void *hdr; + + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); + if (!hdr) + return -ENOBUFS; + + if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) || + nla_put_string(msg, NL802154_ATTR_WPAN_PHY_NAME, + wpan_phy_name(&rdev->wpan_phy)) || + nla_put_u32(msg, NL802154_ATTR_GENERATION, + cfg802154_rdev_list_generation)) + goto nla_put_failure; + + if (cmd != NL802154_CMD_NEW_WPAN_PHY) + goto finish; + + /* DUMP PHY PIB */ + + /* current channel settings */ + if (nla_put_u8(msg, NL802154_ATTR_PAGE, + rdev->wpan_phy.current_page) || + nla_put_u8(msg, NL802154_ATTR_CHANNEL, + rdev->wpan_phy.current_channel)) + goto nla_put_failure; + + /* supported channels array */ + if (nl802154_send_wpan_phy_channels(rdev, msg)) + goto nla_put_failure; + + /* cca mode */ + if (nla_put_u8(msg, NL802154_ATTR_CCA_MODE, + rdev->wpan_phy.cca_mode)) + goto nla_put_failure; + + if (nla_put_s8(msg, NL802154_ATTR_TX_POWER, + rdev->wpan_phy.transmit_power)) + goto nla_put_failure; + +finish: + return genlmsg_end(msg, hdr); + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +struct nl802154_dump_wpan_phy_state { + s64 filter_wpan_phy; + long start; + +}; + +static int nl802154_dump_wpan_phy_parse(struct sk_buff *skb, + struct netlink_callback *cb, + struct nl802154_dump_wpan_phy_state *state) +{ + struct nlattr **tb = nl802154_fam.attrbuf; + int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize, + tb, nl802154_fam.maxattr, nl802154_policy); + + /* TODO check if we can handle error here, + * we have no backward compatibility + */ + if (ret) + return 0; + + if (tb[NL802154_ATTR_WPAN_PHY]) + state->filter_wpan_phy = nla_get_u32(tb[NL802154_ATTR_WPAN_PHY]); + if (tb[NL802154_ATTR_WPAN_DEV]) + state->filter_wpan_phy = nla_get_u64(tb[NL802154_ATTR_WPAN_DEV]) >> 32; + if (tb[NL802154_ATTR_IFINDEX]) { + struct net_device *netdev; + struct cfg802154_registered_device *rdev; + int ifidx = nla_get_u32(tb[NL802154_ATTR_IFINDEX]); + + /* TODO netns */ + netdev = __dev_get_by_index(&init_net, ifidx); + if (!netdev) + return -ENODEV; + if (netdev->ieee802154_ptr) { + rdev = wpan_phy_to_rdev( + netdev->ieee802154_ptr->wpan_phy); + state->filter_wpan_phy = rdev->wpan_phy_idx; + } + } + + return 0; +} + +static int +nl802154_dump_wpan_phy(struct sk_buff *skb, struct netlink_callback *cb) +{ + int idx = 0, ret; + struct nl802154_dump_wpan_phy_state *state = (void *)cb->args[0]; + struct cfg802154_registered_device *rdev; + + rtnl_lock(); + if (!state) { + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) { + rtnl_unlock(); + return -ENOMEM; + } + state->filter_wpan_phy = -1; + ret = nl802154_dump_wpan_phy_parse(skb, cb, state); + if (ret) { + kfree(state); + rtnl_unlock(); + return ret; + } + cb->args[0] = (long)state; + } + + list_for_each_entry(rdev, &cfg802154_rdev_list, list) { + /* TODO net ns compare */ + if (++idx <= state->start) + continue; + if (state->filter_wpan_phy != -1 && + state->filter_wpan_phy != rdev->wpan_phy_idx) + continue; + /* attempt to fit multiple wpan_phy data chunks into the skb */ + ret = nl802154_send_wpan_phy(rdev, + NL802154_CMD_NEW_WPAN_PHY, + skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI); + if (ret < 0) { + if ((ret == -ENOBUFS || ret == -EMSGSIZE) && + !skb->len && cb->min_dump_alloc < 4096) { + cb->min_dump_alloc = 4096; + rtnl_unlock(); + return 1; + } + idx--; + break; + } + break; + } + rtnl_unlock(); + + state->start = idx; + + return skb->len; +} + +static int nl802154_dump_wpan_phy_done(struct netlink_callback *cb) +{ + kfree((void *)cb->args[0]); + return 0; +} + +static int nl802154_get_wpan_phy(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (nl802154_send_wpan_phy(rdev, NL802154_CMD_NEW_WPAN_PHY, msg, + info->snd_portid, info->snd_seq, 0) < 0) { + nlmsg_free(msg); + return -ENOBUFS; + } + + return genlmsg_reply(msg, info); +} + +static inline u64 wpan_dev_id(struct wpan_dev *wpan_dev) +{ + return (u64)wpan_dev->identifier | + ((u64)wpan_phy_to_rdev(wpan_dev->wpan_phy)->wpan_phy_idx << 32); +} + +static int +nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, + struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev) +{ + struct net_device *dev = wpan_dev->netdev; + void *hdr; + + hdr = nl802154hdr_put(msg, portid, seq, flags, + NL802154_CMD_NEW_INTERFACE); + if (!hdr) + return -1; + + if (dev && + (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex) || + nla_put_string(msg, NL802154_ATTR_IFNAME, dev->name))) + goto nla_put_failure; + + if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) || + nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) || + nla_put_u64(msg, NL802154_ATTR_WPAN_DEV, wpan_dev_id(wpan_dev)) || + nla_put_u32(msg, NL802154_ATTR_GENERATION, + rdev->devlist_generation ^ + (cfg802154_rdev_list_generation << 2))) + goto nla_put_failure; + + /* address settings */ + if (nla_put_le64(msg, NL802154_ATTR_EXTENDED_ADDR, + wpan_dev->extended_addr) || + nla_put_le16(msg, NL802154_ATTR_SHORT_ADDR, + wpan_dev->short_addr) || + nla_put_le16(msg, NL802154_ATTR_PAN_ID, wpan_dev->pan_id)) + goto nla_put_failure; + + /* ARET handling */ + if (nla_put_s8(msg, NL802154_ATTR_MAX_FRAME_RETRIES, + wpan_dev->frame_retries) || + nla_put_u8(msg, NL802154_ATTR_MAX_BE, wpan_dev->max_be) || + nla_put_u8(msg, NL802154_ATTR_MAX_CSMA_BACKOFFS, + wpan_dev->csma_retries) || + nla_put_u8(msg, NL802154_ATTR_MIN_BE, wpan_dev->min_be)) + goto nla_put_failure; + + /* listen before transmit */ + if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt)) + goto nla_put_failure; + + return genlmsg_end(msg, hdr); + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int +nl802154_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) +{ + int wp_idx = 0; + int if_idx = 0; + int wp_start = cb->args[0]; + int if_start = cb->args[1]; + struct cfg802154_registered_device *rdev; + struct wpan_dev *wpan_dev; + + rtnl_lock(); + list_for_each_entry(rdev, &cfg802154_rdev_list, list) { + /* TODO netns compare */ + if (wp_idx < wp_start) { + wp_idx++; + continue; + } + if_idx = 0; + + list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) { + if (if_idx < if_start) { + if_idx++; + continue; + } + if (nl802154_send_iface(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + rdev, wpan_dev) < 0) { + goto out; + } + if_idx++; + } + + wp_idx++; + } +out: + rtnl_unlock(); + + cb->args[0] = wp_idx; + cb->args[1] = if_idx; + + return skb->len; +} + +static int nl802154_get_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct wpan_dev *wdev = info->user_ptr[1]; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (nl802154_send_iface(msg, info->snd_portid, info->snd_seq, 0, + rdev, wdev) < 0) { + nlmsg_free(msg); + return -ENOBUFS; + } + + return genlmsg_reply(msg, info); +} + +static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + enum nl802154_iftype type = NL802154_IFTYPE_UNSPEC; + __le64 extended_addr = cpu_to_le64(0x0000000000000000ULL); + + /* TODO avoid failing a new interface + * creation due to pending removal? + */ + + if (!info->attrs[NL802154_ATTR_IFNAME]) + return -EINVAL; + + if (info->attrs[NL802154_ATTR_IFTYPE]) { + type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]); + if (type > NL802154_IFTYPE_MAX) + return -EINVAL; + } + + /* TODO add nla_get_le64 to netlink */ + if (info->attrs[NL802154_ATTR_EXTENDED_ADDR]) + extended_addr = (__force __le64)nla_get_u64( + info->attrs[NL802154_ATTR_EXTENDED_ADDR]); + + if (!rdev->ops->add_virtual_intf) + return -EOPNOTSUPP; + + return rdev_add_virtual_intf(rdev, + nla_data(info->attrs[NL802154_ATTR_IFNAME]), + type, extended_addr); +} + +static int nl802154_del_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct wpan_dev *wpan_dev = info->user_ptr[1]; + + if (!rdev->ops->del_virtual_intf) + return -EOPNOTSUPP; + + /* If we remove a wpan device without a netdev then clear + * user_ptr[1] so that nl802154_post_doit won't dereference it + * to check if it needs to do dev_put(). Otherwise it crashes + * since the wpan_dev has been freed, unlike with a netdev where + * we need the dev_put() for the netdev to really be freed. + */ + if (!wpan_dev->netdev) + info->user_ptr[1] = NULL; + + return rdev_del_virtual_intf(rdev, wpan_dev); +} + +static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + u8 channel, page; + + if (!info->attrs[NL802154_ATTR_PAGE] || + !info->attrs[NL802154_ATTR_CHANNEL]) + return -EINVAL; + + page = nla_get_u8(info->attrs[NL802154_ATTR_PAGE]); + channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]); + + /* check 802.15.4 constraints */ + if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL) + return -EINVAL; + + return rdev_set_channel(rdev, page, channel); +} + +static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + __le16 pan_id; + + /* conflict here while tx/rx calls */ + if (netif_running(dev)) + return -EBUSY; + + /* don't change address fields on monitor */ + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EINVAL; + + if (!info->attrs[NL802154_ATTR_PAN_ID]) + return -EINVAL; + + pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]); + + return rdev_set_pan_id(rdev, wpan_dev, pan_id); +} + +static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + __le16 short_addr; + + /* conflict here while tx/rx calls */ + if (netif_running(dev)) + return -EBUSY; + + /* don't change address fields on monitor */ + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EINVAL; + + if (!info->attrs[NL802154_ATTR_SHORT_ADDR]) + return -EINVAL; + + short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]); + + return rdev_set_short_addr(rdev, wpan_dev, short_addr); +} + +static int +nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + u8 min_be, max_be; + + /* should be set on netif open inside phy settings */ + if (netif_running(dev)) + return -EBUSY; + + if (!info->attrs[NL802154_ATTR_MIN_BE] || + !info->attrs[NL802154_ATTR_MAX_BE]) + return -EINVAL; + + min_be = nla_get_u8(info->attrs[NL802154_ATTR_MIN_BE]); + max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]); + + /* check 802.15.4 constraints */ + if (max_be < 3 || max_be > 8 || min_be > max_be) + return -EINVAL; + + return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be); +} + +static int +nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + u8 max_csma_backoffs; + + /* conflict here while other running iface settings */ + if (netif_running(dev)) + return -EBUSY; + + if (!info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]) + return -EINVAL; + + max_csma_backoffs = nla_get_u8( + info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]); + + /* check 802.15.4 constraints */ + if (max_csma_backoffs > 5) + return -EINVAL; + + return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs); +} + +static int +nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + s8 max_frame_retries; + + if (netif_running(dev)) + return -EBUSY; + + if (!info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]) + return -EINVAL; + + max_frame_retries = nla_get_s8( + info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]); + + /* check 802.15.4 constraints */ + if (max_frame_retries < -1 || max_frame_retries > 7) + return -EINVAL; + + return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries); +} + +static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + bool mode; + + if (netif_running(dev)) + return -EBUSY; + + if (!info->attrs[NL802154_ATTR_LBT_MODE]) + return -EINVAL; + + mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]); + return rdev_set_lbt_mode(rdev, wpan_dev, mode); +} + +#define NL802154_FLAG_NEED_WPAN_PHY 0x01 +#define NL802154_FLAG_NEED_NETDEV 0x02 +#define NL802154_FLAG_NEED_RTNL 0x04 +#define NL802154_FLAG_CHECK_NETDEV_UP 0x08 +#define NL802154_FLAG_NEED_NETDEV_UP (NL802154_FLAG_NEED_NETDEV |\ + NL802154_FLAG_CHECK_NETDEV_UP) +#define NL802154_FLAG_NEED_WPAN_DEV 0x10 +#define NL802154_FLAG_NEED_WPAN_DEV_UP (NL802154_FLAG_NEED_WPAN_DEV |\ + NL802154_FLAG_CHECK_NETDEV_UP) + +static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg802154_registered_device *rdev; + struct wpan_dev *wpan_dev; + struct net_device *dev; + bool rtnl = ops->internal_flags & NL802154_FLAG_NEED_RTNL; + + if (rtnl) + rtnl_lock(); + + if (ops->internal_flags & NL802154_FLAG_NEED_WPAN_PHY) { + rdev = cfg802154_get_dev_from_info(genl_info_net(info), info); + if (IS_ERR(rdev)) { + if (rtnl) + rtnl_unlock(); + return PTR_ERR(rdev); + } + info->user_ptr[0] = rdev; + } else if (ops->internal_flags & NL802154_FLAG_NEED_NETDEV || + ops->internal_flags & NL802154_FLAG_NEED_WPAN_DEV) { + ASSERT_RTNL(); + wpan_dev = __cfg802154_wpan_dev_from_attrs(genl_info_net(info), + info->attrs); + if (IS_ERR(wpan_dev)) { + if (rtnl) + rtnl_unlock(); + return PTR_ERR(wpan_dev); + } + + dev = wpan_dev->netdev; + rdev = wpan_phy_to_rdev(wpan_dev->wpan_phy); + + if (ops->internal_flags & NL802154_FLAG_NEED_NETDEV) { + if (!dev) { + if (rtnl) + rtnl_unlock(); + return -EINVAL; + } + + info->user_ptr[1] = dev; + } else { + info->user_ptr[1] = wpan_dev; + } + + if (dev) { + if (ops->internal_flags & NL802154_FLAG_CHECK_NETDEV_UP && + !netif_running(dev)) { + if (rtnl) + rtnl_unlock(); + return -ENETDOWN; + } + + dev_hold(dev); + } + + info->user_ptr[0] = rdev; + } + + return 0; +} + +static void nl802154_post_doit(const struct genl_ops *ops, struct sk_buff *skb, + struct genl_info *info) +{ + if (info->user_ptr[1]) { + if (ops->internal_flags & NL802154_FLAG_NEED_WPAN_DEV) { + struct wpan_dev *wpan_dev = info->user_ptr[1]; + + if (wpan_dev->netdev) + dev_put(wpan_dev->netdev); + } else { + dev_put(info->user_ptr[1]); + } + } + + if (ops->internal_flags & NL802154_FLAG_NEED_RTNL) + rtnl_unlock(); +} + +static const struct genl_ops nl802154_ops[] = { + { + .cmd = NL802154_CMD_GET_WPAN_PHY, + .doit = nl802154_get_wpan_phy, + .dumpit = nl802154_dump_wpan_phy, + .done = nl802154_dump_wpan_phy_done, + .policy = nl802154_policy, + /* can be retrieved by unprivileged users */ + .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_GET_INTERFACE, + .doit = nl802154_get_interface, + .dumpit = nl802154_dump_interface, + .policy = nl802154_policy, + /* can be retrieved by unprivileged users */ + .internal_flags = NL802154_FLAG_NEED_WPAN_DEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_NEW_INTERFACE, + .doit = nl802154_new_interface, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_DEL_INTERFACE, + .doit = nl802154_del_interface, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_WPAN_DEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_SET_CHANNEL, + .doit = nl802154_set_channel, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_SET_PAN_ID, + .doit = nl802154_set_pan_id, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_SET_SHORT_ADDR, + .doit = nl802154_set_short_addr, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_SET_BACKOFF_EXPONENT, + .doit = nl802154_set_backoff_exponent, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_SET_MAX_CSMA_BACKOFFS, + .doit = nl802154_set_max_csma_backoffs, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_SET_MAX_FRAME_RETRIES, + .doit = nl802154_set_max_frame_retries, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, + { + .cmd = NL802154_CMD_SET_LBT_MODE, + .doit = nl802154_set_lbt_mode, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, +}; + +/* initialisation/exit functions */ +int nl802154_init(void) +{ + return genl_register_family_with_ops_groups(&nl802154_fam, nl802154_ops, + nl802154_mcgrps); +} + +void nl802154_exit(void) +{ + genl_unregister_family(&nl802154_fam); +} diff --git a/net/ieee802154/nl802154.h b/net/ieee802154/nl802154.h new file mode 100644 index 0000000000000000000000000000000000000000..3846a89d0958ef188a0ee6ff6ad4d8bf627f65a0 --- /dev/null +++ b/net/ieee802154/nl802154.h @@ -0,0 +1,7 @@ +#ifndef __IEEE802154_NL802154_H +#define __IEEE802154_NL802154_H + +int nl802154_init(void); +void nl802154_exit(void); + +#endif /* __IEEE802154_NL802154_H */ diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c index 3a703ab88348fc38481ec583ff5d0885ae8c9655..35c4326684548d88f408f84ded46ce6ac2d04ebc 100644 --- a/net/ieee802154/nl_policy.c +++ b/net/ieee802154/nl_policy.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * */ #include diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 9d1f64806f02127808986a84b36fae92e33df4a2..1674b115c89132b47527d7c4b29f21f8d20f15a9 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Dmitry Eremin-Solenikov @@ -154,7 +150,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, skb_reset_mac_header(skb); skb_reset_network_header(skb); - err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); + err = memcpy_from_msg(skb_put(skb, size), msg, size); if (err < 0) goto out_skb; @@ -195,7 +191,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; @@ -225,7 +221,6 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } - void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb) { struct sock *sk; diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h new file mode 100644 index 0000000000000000000000000000000000000000..aff54fbd9264e92fd03578d6ecb5298d53226a42 --- /dev/null +++ b/net/ieee802154/rdev-ops.h @@ -0,0 +1,89 @@ +#ifndef __CFG802154_RDEV_OPS +#define __CFG802154_RDEV_OPS + +#include + +#include "core.h" + +static inline struct net_device * +rdev_add_virtual_intf_deprecated(struct cfg802154_registered_device *rdev, + const char *name, int type) +{ + return rdev->ops->add_virtual_intf_deprecated(&rdev->wpan_phy, name, + type); +} + +static inline void +rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev, + struct net_device *dev) +{ + rdev->ops->del_virtual_intf_deprecated(&rdev->wpan_phy, dev); +} + +static inline int +rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name, + enum nl802154_iftype type, __le64 extended_addr) +{ + return rdev->ops->add_virtual_intf(&rdev->wpan_phy, name, type, + extended_addr); +} + +static inline int +rdev_del_virtual_intf(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev) +{ + return rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev); +} + +static inline int +rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel) +{ + return rdev->ops->set_channel(&rdev->wpan_phy, page, channel); +} + +static inline int +rdev_set_pan_id(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, __le16 pan_id) +{ + return rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id); +} + +static inline int +rdev_set_short_addr(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, __le16 short_addr) +{ + return rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr); +} + +static inline int +rdev_set_backoff_exponent(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, u8 min_be, u8 max_be) +{ + return rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev, + min_be, max_be); +} + +static inline int +rdev_set_max_csma_backoffs(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, u8 max_csma_backoffs) +{ + return rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev, + max_csma_backoffs); +} + +static inline int +rdev_set_max_frame_retries(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, s8 max_frame_retries) +{ + return rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev, + max_frame_retries); +} + +static inline int +rdev_set_lbt_mode(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, bool mode) +{ + return rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode); +} + +#endif /* __CFG802154_RDEV_OPS */ diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c index 7cfcd6885225b57cb94e1ac51a25baeb76d83380..9d980ed3ffe2c5ed5d1683f9ee1a962751706e21 100644 --- a/net/ieee802154/reassembly.c +++ b/net/ieee802154/reassembly.c @@ -33,7 +33,7 @@ static const char lowpan_frags_cache_name[] = "lowpan-frags"; struct lowpan_frag_info { - __be16 d_tag; + u16 d_tag; u16 d_size; u8 d_offset; }; @@ -48,7 +48,7 @@ static struct inet_frags lowpan_frags; static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, struct net_device *dev); -static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size, +static unsigned int lowpan_hash_frag(u16 tag, u16 d_size, const struct ieee802154_addr *saddr, const struct ieee802154_addr *daddr) { @@ -330,11 +330,13 @@ static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type, { bool fail; u8 pattern = 0, low = 0; + __be16 d_tag = 0; fail = lowpan_fetch_skb(skb, &pattern, 1); fail |= lowpan_fetch_skb(skb, &low, 1); frag_info->d_size = (pattern & 7) << 8 | low; - fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2); + fail |= lowpan_fetch_skb(skb, &d_tag, 2); + frag_info->d_tag = ntohs(d_tag); if (frag_type == LOWPAN_DISPATCH_FRAGN) { fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1); diff --git a/net/ieee802154/reassembly.h b/net/ieee802154/reassembly.h index 74e4a7c98191109cadf7ae029c7f42602630cbc7..836b16fa001f19852fec7f2890b8ef303d2c0773 100644 --- a/net/ieee802154/reassembly.h +++ b/net/ieee802154/reassembly.h @@ -4,7 +4,7 @@ #include struct lowpan_create_arg { - __be16 tag; + u16 tag; u16 d_size; const struct ieee802154_addr *src; const struct ieee802154_addr *dst; @@ -15,7 +15,7 @@ struct lowpan_create_arg { struct lowpan_frag_queue { struct inet_frag_queue q; - __be16 tag; + u16 tag; u16 d_size; struct ieee802154_addr saddr; struct ieee802154_addr daddr; diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..1613b9c65dfa15aafd154042e60abc44517bc9d6 --- /dev/null +++ b/net/ieee802154/sysfs.c @@ -0,0 +1,128 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Authors: + * Alexander Aring + * + * Based on: net/wireless/sysfs.c + */ + +#include + +#include + +#include "core.h" +#include "sysfs.h" + +static inline struct cfg802154_registered_device * +dev_to_rdev(struct device *dev) +{ + return container_of(dev, struct cfg802154_registered_device, + wpan_phy.dev); +} + +#define SHOW_FMT(name, fmt, member) \ +static ssize_t name ## _show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \ +} \ +static DEVICE_ATTR_RO(name) + +SHOW_FMT(index, "%d", wpan_phy_idx); + +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct wpan_phy *wpan_phy = &dev_to_rdev(dev)->wpan_phy; + + return sprintf(buf, "%s\n", dev_name(&wpan_phy->dev)); +} +static DEVICE_ATTR_RO(name); + +#define MASTER_SHOW_COMPLEX(name, format_string, args...) \ +static ssize_t name ## _show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \ + int ret; \ + \ + mutex_lock(&phy->pib_lock); \ + ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \ + mutex_unlock(&phy->pib_lock); \ + return ret; \ +} \ +static DEVICE_ATTR_RO(name) + +#define MASTER_SHOW(field, format_string) \ + MASTER_SHOW_COMPLEX(field, format_string, phy->field) + +MASTER_SHOW(current_channel, "%d"); +MASTER_SHOW(current_page, "%d"); +MASTER_SHOW(transmit_power, "%d +- 1 dB"); +MASTER_SHOW(cca_mode, "%d"); + +static ssize_t channels_supported_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); + int ret; + int i, len = 0; + + mutex_lock(&phy->pib_lock); + for (i = 0; i < 32; i++) { + ret = snprintf(buf + len, PAGE_SIZE - len, + "%#09x\n", phy->channels_supported[i]); + if (ret < 0) + break; + len += ret; + } + mutex_unlock(&phy->pib_lock); + return len; +} +static DEVICE_ATTR_RO(channels_supported); + +static void wpan_phy_release(struct device *dev) +{ + struct cfg802154_registered_device *rdev = dev_to_rdev(dev); + + cfg802154_dev_free(rdev); +} + +static struct attribute *pmib_attrs[] = { + &dev_attr_index.attr, + &dev_attr_name.attr, + /* below will be removed soon */ + &dev_attr_current_channel.attr, + &dev_attr_current_page.attr, + &dev_attr_channels_supported.attr, + &dev_attr_transmit_power.attr, + &dev_attr_cca_mode.attr, + NULL, +}; +ATTRIBUTE_GROUPS(pmib); + +struct class wpan_phy_class = { + .name = "ieee802154", + .dev_release = wpan_phy_release, + .dev_groups = pmib_groups, +}; + +int wpan_phy_sysfs_init(void) +{ + return class_register(&wpan_phy_class); +} + +void wpan_phy_sysfs_exit(void) +{ + class_unregister(&wpan_phy_class); +} diff --git a/net/ieee802154/sysfs.h b/net/ieee802154/sysfs.h new file mode 100644 index 0000000000000000000000000000000000000000..aa42e39ecbecac48cc9a61ca525f38cebf598f74 --- /dev/null +++ b/net/ieee802154/sysfs.h @@ -0,0 +1,9 @@ +#ifndef __IEEE802154_SYSFS_H +#define __IEEE802154_SYSFS_H + +int wpan_phy_sysfs_init(void); +void wpan_phy_sysfs_exit(void); + +extern struct class wpan_phy_class; + +#endif /* __IEEE802154_SYSFS_H */ diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c deleted file mode 100644 index 4955e0fe5883ae705edb822b62aab36c93e7e646..0000000000000000000000000000000000000000 --- a/net/ieee802154/wpan-class.c +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright (C) 2007, 2008, 2009 Siemens AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - */ - -#include -#include -#include -#include - -#include - -#include "ieee802154.h" - -#define MASTER_SHOW_COMPLEX(name, format_string, args...) \ -static ssize_t name ## _show(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \ - int ret; \ - \ - mutex_lock(&phy->pib_lock); \ - ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \ - mutex_unlock(&phy->pib_lock); \ - return ret; \ -} \ -static DEVICE_ATTR_RO(name); - -#define MASTER_SHOW(field, format_string) \ - MASTER_SHOW_COMPLEX(field, format_string, phy->field) - -MASTER_SHOW(current_channel, "%d"); -MASTER_SHOW(current_page, "%d"); -MASTER_SHOW(transmit_power, "%d +- 1 dB"); -MASTER_SHOW(cca_mode, "%d"); - -static ssize_t channels_supported_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); - int ret; - int i, len = 0; - - mutex_lock(&phy->pib_lock); - for (i = 0; i < 32; i++) { - ret = snprintf(buf + len, PAGE_SIZE - len, - "%#09x\n", phy->channels_supported[i]); - if (ret < 0) - break; - len += ret; - } - mutex_unlock(&phy->pib_lock); - return len; -} -static DEVICE_ATTR_RO(channels_supported); - -static struct attribute *pmib_attrs[] = { - &dev_attr_current_channel.attr, - &dev_attr_current_page.attr, - &dev_attr_channels_supported.attr, - &dev_attr_transmit_power.attr, - &dev_attr_cca_mode.attr, - NULL, -}; -ATTRIBUTE_GROUPS(pmib); - -static void wpan_phy_release(struct device *d) -{ - struct wpan_phy *phy = container_of(d, struct wpan_phy, dev); - - kfree(phy); -} - -static struct class wpan_phy_class = { - .name = "ieee802154", - .dev_release = wpan_phy_release, - .dev_groups = pmib_groups, -}; - -static DEFINE_MUTEX(wpan_phy_mutex); -static int wpan_phy_idx; - -static int wpan_phy_match(struct device *dev, const void *data) -{ - return !strcmp(dev_name(dev), (const char *)data); -} - -struct wpan_phy *wpan_phy_find(const char *str) -{ - struct device *dev; - - if (WARN_ON(!str)) - return NULL; - - dev = class_find_device(&wpan_phy_class, NULL, str, wpan_phy_match); - if (!dev) - return NULL; - - return container_of(dev, struct wpan_phy, dev); -} -EXPORT_SYMBOL(wpan_phy_find); - -struct wpan_phy_iter_data { - int (*fn)(struct wpan_phy *phy, void *data); - void *data; -}; - -static int wpan_phy_iter(struct device *dev, void *_data) -{ - struct wpan_phy_iter_data *wpid = _data; - struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); - - return wpid->fn(phy, wpid->data); -} - -int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), - void *data) -{ - struct wpan_phy_iter_data wpid = { - .fn = fn, - .data = data, - }; - - return class_for_each_device(&wpan_phy_class, NULL, - &wpid, wpan_phy_iter); -} -EXPORT_SYMBOL(wpan_phy_for_each); - -static int wpan_phy_idx_valid(int idx) -{ - return idx >= 0; -} - -struct wpan_phy *wpan_phy_alloc(size_t priv_size) -{ - struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, - GFP_KERNEL); - - if (!phy) - goto out; - mutex_lock(&wpan_phy_mutex); - phy->idx = wpan_phy_idx++; - if (unlikely(!wpan_phy_idx_valid(phy->idx))) { - wpan_phy_idx--; - mutex_unlock(&wpan_phy_mutex); - kfree(phy); - goto out; - } - mutex_unlock(&wpan_phy_mutex); - - mutex_init(&phy->pib_lock); - - device_initialize(&phy->dev); - dev_set_name(&phy->dev, "wpan-phy%d", phy->idx); - - phy->dev.class = &wpan_phy_class; - - phy->current_channel = -1; /* not initialised */ - phy->current_page = 0; /* for compatibility */ - - return phy; - -out: - return NULL; -} -EXPORT_SYMBOL(wpan_phy_alloc); - -int wpan_phy_register(struct wpan_phy *phy) -{ - return device_add(&phy->dev); -} -EXPORT_SYMBOL(wpan_phy_register); - -void wpan_phy_unregister(struct wpan_phy *phy) -{ - device_del(&phy->dev); -} -EXPORT_SYMBOL(wpan_phy_unregister); - -void wpan_phy_free(struct wpan_phy *phy) -{ - put_device(&phy->dev); -} -EXPORT_SYMBOL(wpan_phy_free); - -static int __init wpan_phy_class_init(void) -{ - int rc; - - rc = class_register(&wpan_phy_class); - if (rc) - goto err; - - rc = ieee802154_nl_init(); - if (rc) - goto err_nl; - - return 0; -err_nl: - class_unregister(&wpan_phy_class); -err: - return rc; -} -subsys_initcall(wpan_phy_class_init); - -static void __exit wpan_phy_class_exit(void) -{ - ieee802154_nl_exit(); - class_unregister(&wpan_phy_class); -} -module_exit(wpan_phy_class_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface"); -MODULE_AUTHOR("Dmitry Eremin-Solenikov"); - diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index e682b48e070993e4221a4400a8c86c3fa0c02684..bd290160484263ba1507b8ae5b05580430fa9e56 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -322,6 +322,15 @@ config NET_FOU network mechanisms and optimizations for UDP (such as ECMP and RSS) can be leveraged to provide better service. +config NET_FOU_IP_TUNNELS + bool "IP: FOU encapsulation of IP tunnels" + depends on NET_IPIP || NET_IPGRE || IPV6_SIT + select NET_FOU + ---help--- + Allow configuration of FOU or GUE encapsulation for IP tunnels. + When this option is enabled IP tunnels can be configured to use + FOU or GUE encapsulation. + config GENEVE tristate "Generic Network Virtualization Encapsulation (Geneve)" depends on INET diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index e67da4e6c3240bb20a7d8a03a3b579f7484f629e..a44773c8346c13c24535448f7e33105c894ac279 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1222,7 +1222,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, SKB_GSO_TCPV6 | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | - SKB_GSO_MPLS | + SKB_GSO_TUNNEL_REMCSUM | 0))) goto out; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 16acb59d665e32e321a583b2178fcba5b7f7b88b..205e1472aa7819784091d588818e1ab602f6006f 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1292,7 +1292,7 @@ static int arp_proc_init(void); void __init arp_init(void) { - neigh_table_init(&arp_tbl); + neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl); dev_add_pack(&arp_packet_type); arp_proc_init(); diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 4715f25dfe0351a2d219f8c2aa7756da75e51dcb..5160c710f2eb4d4c3506ce41039f58cf50f5521c 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -50,7 +50,7 @@ #include #include #include -#include +#include #include /* List of available DOI definitions */ @@ -72,6 +72,7 @@ struct cipso_v4_map_cache_bkt { u32 size; struct list_head list; }; + struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; @@ -82,7 +83,8 @@ struct cipso_v4_map_cache_entry { u32 activity; struct list_head list; }; -static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; + +static struct cipso_v4_map_cache_bkt *cipso_v4_cache; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; @@ -539,7 +541,7 @@ doi_add_return: /** * cipso_v4_doi_free - Frees a DOI definition - * @entry: the entry's RCU field + * @doi_def: the DOI definition * * Description: * This function frees all of the memory associated with a DOI definition. diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 360b565918c4b524d561a010ed7ec5df02312cdb..60173d4d3a0e335a91d2e9a463eeef6f6b88adfe 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -392,8 +392,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) if (elen <= 0) goto out; - if ((err = skb_cow_data(skb, 0, &trailer)) < 0) + err = skb_cow_data(skb, 0, &trailer); + if (err < 0) goto out; + nfrags = err; assoclen = sizeof(*esph); @@ -601,12 +603,12 @@ static int esp_init_authenc(struct xfrm_state *x) BUG_ON(!aalg_desc); err = -EINVAL; - if (aalg_desc->uinfo.auth.icv_fullbits/8 != + if (aalg_desc->uinfo.auth.icv_fullbits / 8 != crypto_aead_authsize(aead)) { - NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", - x->aalg->alg_name, - crypto_aead_authsize(aead), - aalg_desc->uinfo.auth.icv_fullbits/8); + pr_info("ESP: %s digestsize %u != %hu\n", + x->aalg->alg_name, + crypto_aead_authsize(aead), + aalg_desc->uinfo.auth.icv_fullbits / 8); goto free_key; } diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 606c520ffd5af44dd79126ce119945e1f8a070ce..b986298a7ba39908290ccd808a24947d776b91b4 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -38,21 +38,17 @@ static inline struct fou *fou_from_sock(struct sock *sk) return sk->sk_user_data; } -static int fou_udp_encap_recv_deliver(struct sk_buff *skb, - u8 protocol, size_t len) +static void fou_recv_pull(struct sk_buff *skb, size_t len) { struct iphdr *iph = ip_hdr(skb); /* Remove 'len' bytes from the packet (UDP header and - * FOU header if present), modify the protocol to the one - * we found, and then call rcv_encap. + * FOU header if present). */ iph->tot_len = htons(ntohs(iph->tot_len) - len); __skb_pull(skb, len); skb_postpull_rcsum(skb, udp_hdr(skb), len); skb_reset_transport_header(skb); - - return -protocol; } static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) @@ -62,16 +58,56 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) if (!fou) return 1; - return fou_udp_encap_recv_deliver(skb, fou->protocol, - sizeof(struct udphdr)); + fou_recv_pull(skb, sizeof(struct udphdr)); + + return -fou->protocol; +} + +static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, + void *data, size_t hdrlen, u8 ipproto) +{ + __be16 *pd = data; + size_t start = ntohs(pd[0]); + size_t offset = ntohs(pd[1]); + size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); + __wsum delta; + + if (skb->remcsum_offload) { + /* Already processed in GRO path */ + skb->remcsum_offload = 0; + return guehdr; + } + + if (!pskb_may_pull(skb, plen)) + return NULL; + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; + + if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) + __skb_checksum_complete(skb); + + delta = remcsum_adjust((void *)guehdr + hdrlen, + skb->csum, start, offset); + + /* Adjust skb->csum since we changed the packet */ + skb->csum = csum_add(skb->csum, delta); + + return guehdr; +} + +static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr) +{ + /* No support yet */ + kfree_skb(skb); + return 0; } static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) { struct fou *fou = fou_from_sock(sk); - size_t len; + size_t len, optlen, hdrlen; struct guehdr *guehdr; - struct udphdr *uh; + void *data; + u16 doffset = 0; if (!fou) return 1; @@ -80,25 +116,58 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) if (!pskb_may_pull(skb, len)) goto drop; - uh = udp_hdr(skb); - guehdr = (struct guehdr *)&uh[1]; + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; + + optlen = guehdr->hlen << 2; + len += optlen; - len += guehdr->hlen << 2; if (!pskb_may_pull(skb, len)) goto drop; - uh = udp_hdr(skb); - guehdr = (struct guehdr *)&uh[1]; + /* guehdr may change after pull */ + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; - if (guehdr->version != 0) - goto drop; + hdrlen = sizeof(struct guehdr) + optlen; - if (guehdr->flags) { - /* No support yet */ + if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen)) goto drop; + + hdrlen = sizeof(struct guehdr) + optlen; + + ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len); + + /* Pull csum through the guehdr now . This can be used if + * there is a remote checksum offload. + */ + skb_postpull_rcsum(skb, udp_hdr(skb), len); + + data = &guehdr[1]; + + if (guehdr->flags & GUE_FLAG_PRIV) { + __be32 flags = *(__be32 *)(data + doffset); + + doffset += GUE_LEN_PRIV; + + if (flags & GUE_PFLAG_REMCSUM) { + guehdr = gue_remcsum(skb, guehdr, data + doffset, + hdrlen, guehdr->proto_ctype); + if (!guehdr) + goto drop; + + data = &guehdr[1]; + + doffset += GUE_PLEN_REMCSUM; + } } - return fou_udp_encap_recv_deliver(skb, guehdr->next_hdr, len); + if (unlikely(guehdr->control)) + return gue_control_message(skb, guehdr); + + __skb_pull(skb, sizeof(struct udphdr) + hdrlen); + skb_reset_transport_header(skb); + + return -guehdr->proto_ctype; + drop: kfree_skb(skb); return 0; @@ -149,6 +218,41 @@ out_unlock: return err; } +static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, + struct guehdr *guehdr, void *data, + size_t hdrlen, u8 ipproto) +{ + __be16 *pd = data; + size_t start = ntohs(pd[0]); + size_t offset = ntohs(pd[1]); + size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); + __wsum delta; + + if (skb->remcsum_offload) + return guehdr; + + if (!NAPI_GRO_CB(skb)->csum_valid) + return NULL; + + /* Pull checksum that will be written */ + if (skb_gro_header_hard(skb, off + plen)) { + guehdr = skb_gro_header_slow(skb, off + plen, off); + if (!guehdr) + return NULL; + } + + delta = remcsum_adjust((void *)guehdr + hdrlen, + NAPI_GRO_CB(skb)->csum, start, offset); + + /* Adjust skb->csum since we changed the packet */ + skb->csum = csum_add(skb->csum, delta); + NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); + + skb->remcsum_offload = 1; + + return guehdr; +} + static struct sk_buff **gue_gro_receive(struct sk_buff **head, struct sk_buff *skb) { @@ -156,38 +260,64 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, const struct net_offload *ops; struct sk_buff **pp = NULL; struct sk_buff *p; - u8 proto; struct guehdr *guehdr; - unsigned int hlen, guehlen; - unsigned int off; + size_t len, optlen, hdrlen, off; + void *data; + u16 doffset = 0; int flush = 1; off = skb_gro_offset(skb); - hlen = off + sizeof(*guehdr); + len = off + sizeof(*guehdr); + guehdr = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, hlen)) { - guehdr = skb_gro_header_slow(skb, hlen, off); + if (skb_gro_header_hard(skb, len)) { + guehdr = skb_gro_header_slow(skb, len, off); if (unlikely(!guehdr)) goto out; } - proto = guehdr->next_hdr; + optlen = guehdr->hlen << 2; + len += optlen; - rcu_read_lock(); - offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; - ops = rcu_dereference(offloads[proto]); - if (WARN_ON(!ops || !ops->callbacks.gro_receive)) - goto out_unlock; + if (skb_gro_header_hard(skb, len)) { + guehdr = skb_gro_header_slow(skb, len, off); + if (unlikely(!guehdr)) + goto out; + } - guehlen = sizeof(*guehdr) + (guehdr->hlen << 2); + if (unlikely(guehdr->control) || guehdr->version != 0 || + validate_gue_flags(guehdr, optlen)) + goto out; - hlen = off + guehlen; - if (skb_gro_header_hard(skb, hlen)) { - guehdr = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!guehdr)) - goto out_unlock; + hdrlen = sizeof(*guehdr) + optlen; + + /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr, + * this is needed if there is a remote checkcsum offload. + */ + skb_gro_postpull_rcsum(skb, guehdr, hdrlen); + + data = &guehdr[1]; + + if (guehdr->flags & GUE_FLAG_PRIV) { + __be32 flags = *(__be32 *)(data + doffset); + + doffset += GUE_LEN_PRIV; + + if (flags & GUE_PFLAG_REMCSUM) { + guehdr = gue_gro_remcsum(skb, off, guehdr, + data + doffset, hdrlen, + guehdr->proto_ctype); + if (!guehdr) + goto out; + + data = &guehdr[1]; + + doffset += GUE_PLEN_REMCSUM; + } } + skb_gro_pull(skb, hdrlen); + flush = 0; for (p = *head; p; p = p->next) { @@ -199,7 +329,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, guehdr2 = (struct guehdr *)(p->data + off); /* Compare base GUE header to be equal (covers - * hlen, version, next_hdr, and flags. + * hlen, version, proto_ctype, and flags. */ if (guehdr->word != guehdr2->word) { NAPI_GRO_CB(p)->same_flow = 0; @@ -214,10 +344,11 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, } } - skb_gro_pull(skb, guehlen); - - /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ - skb_gro_postpull_rcsum(skb, guehdr, guehlen); + rcu_read_lock(); + offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; + ops = rcu_dereference(offloads[guehdr->proto_ctype]); + if (WARN_ON(!ops || !ops->callbacks.gro_receive)) + goto out_unlock; pp = ops->callbacks.gro_receive(head, skb); @@ -238,7 +369,7 @@ static int gue_gro_complete(struct sk_buff *skb, int nhoff) u8 proto; int err = -ENOENT; - proto = guehdr->next_hdr; + proto = guehdr->proto_ctype; guehlen = sizeof(*guehdr) + (guehdr->hlen << 2); @@ -489,6 +620,200 @@ static const struct genl_ops fou_nl_ops[] = { }, }; +size_t fou_encap_hlen(struct ip_tunnel_encap *e) +{ + return sizeof(struct udphdr); +} +EXPORT_SYMBOL(fou_encap_hlen); + +size_t gue_encap_hlen(struct ip_tunnel_encap *e) +{ + size_t len; + bool need_priv = false; + + len = sizeof(struct udphdr) + sizeof(struct guehdr); + + if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) { + len += GUE_PLEN_REMCSUM; + need_priv = true; + } + + len += need_priv ? GUE_LEN_PRIV : 0; + + return len; +} +EXPORT_SYMBOL(gue_encap_hlen); + +static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e, + struct flowi4 *fl4, u8 *protocol, __be16 sport) +{ + struct udphdr *uh; + + skb_push(skb, sizeof(struct udphdr)); + skb_reset_transport_header(skb); + + uh = udp_hdr(skb); + + uh->dest = e->dport; + uh->source = sport; + uh->len = htons(skb->len); + uh->check = 0; + udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb, + fl4->saddr, fl4->daddr, skb->len); + + *protocol = IPPROTO_UDP; +} + +int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4) +{ + bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM); + int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + __be16 sport; + + skb = iptunnel_handle_offloads(skb, csum, type); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), + skb, 0, 0, false); + fou_build_udp(skb, e, fl4, protocol, sport); + + return 0; +} +EXPORT_SYMBOL(fou_build_header); + +int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4) +{ + bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM); + int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + struct guehdr *guehdr; + size_t hdrlen, optlen = 0; + __be16 sport; + void *data; + bool need_priv = false; + + if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) && + skb->ip_summed == CHECKSUM_PARTIAL) { + csum = false; + optlen += GUE_PLEN_REMCSUM; + type |= SKB_GSO_TUNNEL_REMCSUM; + need_priv = true; + } + + optlen += need_priv ? GUE_LEN_PRIV : 0; + + skb = iptunnel_handle_offloads(skb, csum, type); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* Get source port (based on flow hash) before skb_push */ + sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), + skb, 0, 0, false); + + hdrlen = sizeof(struct guehdr) + optlen; + + skb_push(skb, hdrlen); + + guehdr = (struct guehdr *)skb->data; + + guehdr->control = 0; + guehdr->version = 0; + guehdr->hlen = optlen >> 2; + guehdr->flags = 0; + guehdr->proto_ctype = *protocol; + + data = &guehdr[1]; + + if (need_priv) { + __be32 *flags = data; + + guehdr->flags |= GUE_FLAG_PRIV; + *flags = 0; + data += GUE_LEN_PRIV; + + if (type & SKB_GSO_TUNNEL_REMCSUM) { + u16 csum_start = skb_checksum_start_offset(skb); + __be16 *pd = data; + + if (csum_start < hdrlen) + return -EINVAL; + + csum_start -= hdrlen; + pd[0] = htons(csum_start); + pd[1] = htons(csum_start + skb->csum_offset); + + if (!skb_is_gso(skb)) { + skb->ip_summed = CHECKSUM_NONE; + skb->encapsulation = 0; + } + + *flags |= GUE_PFLAG_REMCSUM; + data += GUE_PLEN_REMCSUM; + } + + } + + fou_build_udp(skb, e, fl4, protocol, sport); + + return 0; +} +EXPORT_SYMBOL(gue_build_header); + +#ifdef CONFIG_NET_FOU_IP_TUNNELS + +static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = { + .encap_hlen = fou_encap_hlen, + .build_header = fou_build_header, +}; + +static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = { + .encap_hlen = gue_encap_hlen, + .build_header = gue_build_header, +}; + +static int ip_tunnel_encap_add_fou_ops(void) +{ + int ret; + + ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU); + if (ret < 0) { + pr_err("can't add fou ops\n"); + return ret; + } + + ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE); + if (ret < 0) { + pr_err("can't add gue ops\n"); + ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU); + return ret; + } + + return 0; +} + +static void ip_tunnel_encap_del_fou_ops(void) +{ + ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU); + ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE); +} + +#else + +static int ip_tunnel_encap_add_fou_ops(void) +{ + return 0; +} + +static void ip_tunnel_encap_del_fou_ops(void) +{ +} + +#endif + static int __init fou_init(void) { int ret; @@ -496,6 +821,14 @@ static int __init fou_init(void) ret = genl_register_family_with_ops(&fou_nl_family, fou_nl_ops); + if (ret < 0) + goto exit; + + ret = ip_tunnel_encap_add_fou_ops(); + if (ret < 0) + genl_unregister_family(&fou_nl_family); + +exit: return ret; } @@ -503,6 +836,8 @@ static void __exit fou_fini(void) { struct fou *fou, *next; + ip_tunnel_encap_del_fou_ops(); + genl_unregister_family(&fou_nl_family); /* Close all the FOU sockets */ diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index dedb21e9991438259ff66a4e4817349dfd9d3224..a457232f0131c49d1a9fd574c683df8604f48e99 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c @@ -104,7 +104,7 @@ static void geneve_build_header(struct genevehdr *geneveh, memcpy(geneveh->options, options, options_len); } -/* Transmit a fully formated Geneve frame. +/* Transmit a fully formatted Geneve frame. * * When calling this function. The skb->data should point * to the geneve header which is fully formed. @@ -131,15 +131,9 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, if (unlikely(err)) return err; - if (vlan_tx_tag_present(skb)) { - if (unlikely(!__vlan_put_tag(skb, - skb->vlan_proto, - vlan_tx_tag_get(skb)))) { - err = -ENOMEM; - return err; - } - skb->vlan_tci = 0; - } + skb = vlan_hwaccel_push_inside(skb); + if (unlikely(!skb)) + return -ENOMEM; gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index bb5947b0ce2dfbd94a0b85a13983c060b2984075..51973ddc05a68463f8f39e779491374f12e5191a 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -247,6 +247,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff) err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); rcu_read_unlock(); + + skb_set_inner_mac_header(skb, nhoff + grehlen); + return err; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 5882f584910edcd38affd3be200df9d0ddf1ee14..36f5584d93c5da194caad055505b2ca97807c988 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -190,7 +190,7 @@ EXPORT_SYMBOL(icmp_err_convert); */ struct icmp_control { - void (*handler)(struct sk_buff *skb); + bool (*handler)(struct sk_buff *skb); short error; /* This ICMP is classed as an error message */ }; @@ -746,7 +746,7 @@ static bool icmp_tag_validation(int proto) * ICMP_PARAMETERPROB. */ -static void icmp_unreach(struct sk_buff *skb) +static bool icmp_unreach(struct sk_buff *skb) { const struct iphdr *iph; struct icmphdr *icmph; @@ -784,8 +784,8 @@ static void icmp_unreach(struct sk_buff *skb) */ switch (net->ipv4.sysctl_ip_no_pmtu_disc) { default: - LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"), - &iph->daddr); + net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n", + &iph->daddr); break; case 2: goto out; @@ -798,8 +798,8 @@ static void icmp_unreach(struct sk_buff *skb) } break; case ICMP_SR_FAILED: - LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: Source Route Failed\n"), - &iph->daddr); + net_dbg_ratelimited("%pI4: Source Route Failed\n", + &iph->daddr); break; default: break; @@ -839,10 +839,10 @@ static void icmp_unreach(struct sk_buff *skb) icmp_socket_deliver(skb, info); out: - return; + return true; out_err: ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); - goto out; + return false; } @@ -850,17 +850,20 @@ out_err: * Handle ICMP_REDIRECT. */ -static void icmp_redirect(struct sk_buff *skb) +static bool icmp_redirect(struct sk_buff *skb) { if (skb->len < sizeof(struct iphdr)) { ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); - return; + return false; } - if (!pskb_may_pull(skb, sizeof(struct iphdr))) - return; + if (!pskb_may_pull(skb, sizeof(struct iphdr))) { + /* there aught to be a stat */ + return false; + } icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway); + return true; } /* @@ -875,7 +878,7 @@ static void icmp_redirect(struct sk_buff *skb) * See also WRT handling of options once they are done and working. */ -static void icmp_echo(struct sk_buff *skb) +static bool icmp_echo(struct sk_buff *skb) { struct net *net; @@ -891,6 +894,8 @@ static void icmp_echo(struct sk_buff *skb) icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); } + /* should there be an ICMP stat for ignored echos? */ + return true; } /* @@ -900,7 +905,7 @@ static void icmp_echo(struct sk_buff *skb) * MUST be accurate to a few minutes. * MUST be updated at least at 15Hz. */ -static void icmp_timestamp(struct sk_buff *skb) +static bool icmp_timestamp(struct sk_buff *skb) { struct timespec tv; struct icmp_bxm icmp_param; @@ -927,15 +932,17 @@ static void icmp_timestamp(struct sk_buff *skb) icmp_param.data_len = 0; icmp_param.head_len = sizeof(struct icmphdr) + 12; icmp_reply(&icmp_param, skb); -out: - return; + return true; + out_err: ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); - goto out; + return false; } -static void icmp_discard(struct sk_buff *skb) +static bool icmp_discard(struct sk_buff *skb) { + /* pretend it was a success */ + return true; } /* @@ -946,6 +953,7 @@ int icmp_rcv(struct sk_buff *skb) struct icmphdr *icmph; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); + bool success; if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); @@ -1012,7 +1020,12 @@ int icmp_rcv(struct sk_buff *skb) } } - icmp_pointers[icmph->type].handler(skb); + success = icmp_pointers[icmph->type].handler(skb); + + if (success) { + consume_skb(skb); + return 0; + } drop: kfree_skb(skb); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index bb15d0e03d4f851d789734213814ed5359b59be1..666cf364df86dffbab3cd2b7b1ea66e72d3ac7a8 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -112,17 +112,17 @@ #ifdef CONFIG_IP_MULTICAST /* Parameter names and values are taken from igmp-v2-06 draft */ -#define IGMP_V1_Router_Present_Timeout (400*HZ) -#define IGMP_V2_Router_Present_Timeout (400*HZ) -#define IGMP_V2_Unsolicited_Report_Interval (10*HZ) -#define IGMP_V3_Unsolicited_Report_Interval (1*HZ) -#define IGMP_Query_Response_Interval (10*HZ) -#define IGMP_Query_Robustness_Variable 2 +#define IGMP_V1_ROUTER_PRESENT_TIMEOUT (400*HZ) +#define IGMP_V2_ROUTER_PRESENT_TIMEOUT (400*HZ) +#define IGMP_V2_UNSOLICITED_REPORT_INTERVAL (10*HZ) +#define IGMP_V3_UNSOLICITED_REPORT_INTERVAL (1*HZ) +#define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ) +#define IGMP_QUERY_ROBUSTNESS_VARIABLE 2 -#define IGMP_Initial_Report_Delay (1) +#define IGMP_INITIAL_REPORT_DELAY (1) -/* IGMP_Initial_Report_Delay is not from IGMP specs! +/* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs! * IGMP specs require to report membership immediately after * joining a group, but we delay the first report by a * small interval. It seems more natural and still does not @@ -878,15 +878,15 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, if (ih->code == 0) { /* Alas, old v1 router presents here. */ - max_delay = IGMP_Query_Response_Interval; + max_delay = IGMP_QUERY_RESPONSE_INTERVAL; in_dev->mr_v1_seen = jiffies + - IGMP_V1_Router_Present_Timeout; + IGMP_V1_ROUTER_PRESENT_TIMEOUT; group = 0; } else { /* v2 router present */ max_delay = ih->code*(HZ/IGMP_TIMER_SCALE); in_dev->mr_v2_seen = jiffies + - IGMP_V2_Router_Present_Timeout; + IGMP_V2_ROUTER_PRESENT_TIMEOUT; } /* cancel the interface change timer */ in_dev->mr_ifc_count = 0; @@ -898,7 +898,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, return true; /* ignore bogus packet; freed by caller */ } else if (IGMP_V1_SEEN(in_dev)) { /* This is a v3 query with v1 queriers present */ - max_delay = IGMP_Query_Response_Interval; + max_delay = IGMP_QUERY_RESPONSE_INTERVAL; group = 0; } else if (IGMP_V2_SEEN(in_dev)) { /* this is a v3 query with v2 queriers present; @@ -1217,7 +1217,7 @@ static void igmp_group_added(struct ip_mc_list *im) return; if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { spin_lock_bh(&im->lock); - igmp_start_timer(im, IGMP_Initial_Report_Delay); + igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); spin_unlock_bh(&im->lock); return; } @@ -1540,7 +1540,7 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS; int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF; #ifdef CONFIG_IP_MULTICAST -int sysctl_igmp_qrv __read_mostly = IGMP_Query_Robustness_Variable; +int sysctl_igmp_qrv __read_mostly = IGMP_QUERY_ROBUSTNESS_VARIABLE; #endif static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, @@ -2686,11 +2686,7 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v) struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); if (v == SEQ_START_TOKEN) { - seq_printf(seq, - "%3s %6s " - "%10s %10s %6s %6s\n", "Idx", - "Device", "MCA", - "SRC", "INC", "EXC"); + seq_puts(seq, "Idx Device MCA SRC INC EXC\n"); } else { seq_printf(seq, "%3d %6.6s 0x%08x " diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 19419b60cb37d1c59c8238b49eab77d87ff0b4a1..e7920352646aed0a0680557babecfb586283ce92 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -458,6 +458,6 @@ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, ". Dropping fragment.\n"; if (PTR_ERR(q) == -ENOBUFS) - LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); + net_dbg_ratelimited("%s%s", prefix, msg); } EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 2811cc18701a2f49499286fd1a5c4f891abf6144..e5b6d0ddcb5808f662ca0b1fd5863d63e6b54b83 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -80,7 +80,7 @@ struct ipq { struct inet_peer *peer; }; -static inline u8 ip4_frag_ecn(u8 tos) +static u8 ip4_frag_ecn(u8 tos) { return 1 << (tos & INET_ECN_MASK); } @@ -148,7 +148,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a) inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL; } -static __inline__ void ip4_frag_free(struct inet_frag_queue *q) +static void ip4_frag_free(struct inet_frag_queue *q) { struct ipq *qp; @@ -160,7 +160,7 @@ static __inline__ void ip4_frag_free(struct inet_frag_queue *q) /* Destruction primitives. */ -static __inline__ void ipq_put(struct ipq *ipq) +static void ipq_put(struct ipq *ipq) { inet_frag_put(&ipq->q, &ip4_frags); } @@ -236,7 +236,7 @@ out: /* Find the correct entry in the "incomplete datagrams" queue for * this IP datagram, and create new one, if nothing is found. */ -static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) +static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) { struct inet_frag_queue *q; struct ip4_create_arg arg; @@ -256,7 +256,7 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) } /* Is the fragment too far ahead to be part of ipq? */ -static inline int ip_frag_too_far(struct ipq *qp) +static int ip_frag_too_far(struct ipq *qp) { struct inet_peer *peer = qp->peer; unsigned int max = sysctl_ipfrag_max_dist; @@ -618,8 +618,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, return 0; out_nomem: - LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"), - qp); + net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); err = -ENOMEM; goto out_fail; out_oversize: @@ -795,16 +794,16 @@ static void __init ip4_frags_ctl_register(void) register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); } #else -static inline int ip4_frags_ns_ctl_register(struct net *net) +static int ip4_frags_ns_ctl_register(struct net *net) { return 0; } -static inline void ip4_frags_ns_ctl_unregister(struct net *net) +static void ip4_frags_ns_ctl_unregister(struct net *net) { } -static inline void __init ip4_frags_ctl_register(void) +static void __init ip4_frags_ctl_register(void) { } #endif diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 12055fdbe716f0f1d905f288cbe36415f0e4f592..ac8491245e5b2e99a50259bd7a6d95d92bcff4f4 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -789,7 +789,7 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u16(skb, IFLA_GRE_ENCAP_DPORT, t->encap.dport) || nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, - t->encap.dport)) + t->encap.flags)) goto nla_put_failure; return 0; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index bc6471d4abcdaa464d9effc244c5f1bdcb989681..b50861b22b6bea036b1a99ddf141d7ed2d6cf6cd 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -662,12 +662,10 @@ slow_path: if (len < left) { len &= ~7; } - /* - * Allocate buffer. - */ - if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) { - NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n"); + /* Allocate buffer */ + skb2 = alloc_skb(len + hlen + ll_rs, GFP_ATOMIC); + if (!skb2) { err = -ENOMEM; goto fail; } @@ -754,14 +752,16 @@ EXPORT_SYMBOL(ip_fragment); int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { - struct iovec *iov = from; + struct msghdr *msg = from; if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (memcpy_fromiovecend(to, iov, offset, len) < 0) + /* XXX: stripping const */ + if (memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len) < 0) return -EFAULT; } else { __wsum csum = 0; - if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0) + /* XXX: stripping const */ + if (csum_partial_copy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len, &csum) < 0) return -EFAULT; skb->csum = csum_block_add(skb->csum, csum, odd); } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 9daf2177dc005c339c536c618fedc11259c178bc..8a89c738b7a3b43407293f521bd6d7e009ee7c80 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -192,7 +192,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, int err, val; struct cmsghdr *cmsg; - for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { + for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; #if IS_ENABLED(CONFIG_IPV6) @@ -399,6 +399,22 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf kfree_skb(skb); } +static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk, + const struct sk_buff *skb, + int ee_origin) +{ + struct in_pktinfo *info = PKTINFO_SKB_CB(skb); + + if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) || + (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || + (!skb->dev)) + return false; + + info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; + info->ipi_ifindex = skb->dev->ifindex; + return true; +} + /* * Handle MSG_ERRQUEUE */ @@ -414,6 +430,8 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) int err; int copied; + WARN_ON_ONCE(sk->sk_family == AF_INET6); + err = -EAGAIN; skb = sock_dequeue_err_skb(sk); if (skb == NULL) @@ -424,7 +442,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_TRUNC; copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; @@ -444,7 +462,9 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); sin = &errhdr.offender; sin->sin_family = AF_UNSPEC; - if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) { + + if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || + ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin)) { struct inet_sock *inet = inet_sk(sk); sin->sin_family = AF_INET; @@ -1049,7 +1069,7 @@ e_inval: } /** - * ipv4_pktinfo_prepare - transfert some info from rtable to skb + * ipv4_pktinfo_prepare - transfer some info from rtable to skb * @sk: socket * @skb: buffer * diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 0bb8e141eaccd66e9865385a9c71b33d1bbfc65f..63e745aadab6466b2e32e083c99ad31af39243c4 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -56,7 +56,6 @@ #include #include #include -#include #if IS_ENABLED(CONFIG_IPV6) #include @@ -491,18 +490,51 @@ EXPORT_SYMBOL_GPL(ip_tunnel_rcv); static int ip_encap_hlen(struct ip_tunnel_encap *e) { - switch (e->type) { - case TUNNEL_ENCAP_NONE: + const struct ip_tunnel_encap_ops *ops; + int hlen = -EINVAL; + + if (e->type == TUNNEL_ENCAP_NONE) return 0; - case TUNNEL_ENCAP_FOU: - return sizeof(struct udphdr); - case TUNNEL_ENCAP_GUE: - return sizeof(struct udphdr) + sizeof(struct guehdr); - default: + + if (e->type >= MAX_IPTUN_ENCAP_OPS) return -EINVAL; - } + + rcu_read_lock(); + ops = rcu_dereference(iptun_encaps[e->type]); + if (likely(ops && ops->encap_hlen)) + hlen = ops->encap_hlen(e); + rcu_read_unlock(); + + return hlen; } +const struct ip_tunnel_encap_ops __rcu * + iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly; + +int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops, + unsigned int num) +{ + return !cmpxchg((const struct ip_tunnel_encap_ops **) + &iptun_encaps[num], + NULL, ops) ? 0 : -1; +} +EXPORT_SYMBOL(ip_tunnel_encap_add_ops); + +int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops, + unsigned int num) +{ + int ret; + + ret = (cmpxchg((const struct ip_tunnel_encap_ops **) + &iptun_encaps[num], + ops, NULL) == ops) ? 0 : -1; + + synchronize_net(); + + return ret; +} +EXPORT_SYMBOL(ip_tunnel_encap_del_ops); + int ip_tunnel_encap_setup(struct ip_tunnel *t, struct ip_tunnel_encap *ipencap) { @@ -526,63 +558,22 @@ int ip_tunnel_encap_setup(struct ip_tunnel *t, } EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup); -static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, - size_t hdr_len, u8 *protocol, struct flowi4 *fl4) -{ - struct udphdr *uh; - __be16 sport; - bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM); - int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - - skb = iptunnel_handle_offloads(skb, csum, type); - - if (IS_ERR(skb)) - return PTR_ERR(skb); - - /* Get length and hash before making space in skb */ - - sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), - skb, 0, 0, false); - - skb_push(skb, hdr_len); - - skb_reset_transport_header(skb); - uh = udp_hdr(skb); - - if (e->type == TUNNEL_ENCAP_GUE) { - struct guehdr *guehdr = (struct guehdr *)&uh[1]; - - guehdr->version = 0; - guehdr->hlen = 0; - guehdr->flags = 0; - guehdr->next_hdr = *protocol; - } - - uh->dest = e->dport; - uh->source = sport; - uh->len = htons(skb->len); - uh->check = 0; - udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb, - fl4->saddr, fl4->daddr, skb->len); - - *protocol = IPPROTO_UDP; - - return 0; -} - int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, u8 *protocol, struct flowi4 *fl4) { - switch (t->encap.type) { - case TUNNEL_ENCAP_NONE: + const struct ip_tunnel_encap_ops *ops; + int ret = -EINVAL; + + if (t->encap.type == TUNNEL_ENCAP_NONE) return 0; - case TUNNEL_ENCAP_FOU: - case TUNNEL_ENCAP_GUE: - return fou_build_header(skb, &t->encap, t->encap_hlen, - protocol, fl4); - default: - return -EINVAL; - } + + rcu_read_lock(); + ops = rcu_dereference(iptun_encaps[t->encap.type]); + if (likely(ops && ops->build_header)) + ret = ops->build_header(skb, &t->encap, protocol, fl4); + rcu_read_unlock(); + + return ret; } EXPORT_SYMBOL(ip_tunnel_encap); diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 648fa1490ea7f96d1f373b4f8e6b167ed70e82e3..7fa18bc7e47fd3199c18247f19390d61f6e83013 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -115,7 +115,7 @@ */ int ic_set_manually __initdata = 0; /* IPconfig parameters set manually */ -static int ic_enable __initdata = 0; /* IP config enabled? */ +static int ic_enable __initdata; /* IP config enabled? */ /* Protocol choice */ int ic_proto_enabled __initdata = 0 @@ -130,7 +130,7 @@ int ic_proto_enabled __initdata = 0 #endif ; -static int ic_host_name_set __initdata = 0; /* Host name set by us? */ +static int ic_host_name_set __initdata; /* Host name set by us? */ __be32 ic_myaddr = NONE; /* My IP address */ static __be32 ic_netmask = NONE; /* Netmask for local subnet */ @@ -160,17 +160,17 @@ static u8 ic_domain[64]; /* DNS (not NIS) domain name */ static char user_dev_name[IFNAMSIZ] __initdata = { 0, }; /* Protocols supported by available interfaces */ -static int ic_proto_have_if __initdata = 0; +static int ic_proto_have_if __initdata; /* MTU for boot device */ -static int ic_dev_mtu __initdata = 0; +static int ic_dev_mtu __initdata; #ifdef IPCONFIG_DYNAMIC static DEFINE_SPINLOCK(ic_recv_lock); -static volatile int ic_got_reply __initdata = 0; /* Proto(s) that replied */ +static volatile int ic_got_reply __initdata; /* Proto(s) that replied */ #endif #ifdef IPCONFIG_DHCP -static int ic_dhcp_msgtype __initdata = 0; /* DHCP msg type received */ +static int ic_dhcp_msgtype __initdata; /* DHCP msg type received */ #endif @@ -186,8 +186,8 @@ struct ic_device { __be32 xid; }; -static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ -static struct net_device *ic_dev __initdata = NULL; /* Selected device */ +static struct ic_device *ic_first_dev __initdata; /* List of open device */ +static struct net_device *ic_dev __initdata; /* Selected device */ static bool __init ic_is_init_dev(struct net_device *dev) { @@ -498,7 +498,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt struct arphdr *rarp; unsigned char *rarp_ptr; __be32 sip, tip; - unsigned char *sha, *tha; /* s for "source", t for "target" */ + unsigned char *tha; /* t for "target" */ struct ic_device *d; if (!net_eq(dev_net(dev), &init_net)) @@ -549,7 +549,6 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt goto drop_unlock; /* should never happen */ /* Extract variable-width fields */ - sha = rarp_ptr; rarp_ptr += dev->addr_len; memcpy(&sip, rarp_ptr, 4); rarp_ptr += 4; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 37096d64730ef0c3c7c7278995f9c0c6e64896e9..40403114f00a115a6726e0a1b322aa01ca4db0b0 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -465,7 +465,7 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, - tunnel->encap.dport)) + tunnel->encap.flags)) goto nla_put_failure; return 0; diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 4c019d5c3f5759128b8de9ac900282aaa5f014e2..59f883d9cadfcdfb69d775c506b0c911d31ff549 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -113,6 +113,15 @@ config NFT_MASQ_IPV4 This is the expression that provides IPv4 masquerading support for nf_tables. +config NFT_REDIR_IPV4 + tristate "IPv4 redirect support for nf_tables" + depends on NF_TABLES_IPV4 + depends on NFT_REDIR + select NF_NAT_REDIRECT + help + This is the expression that provides IPv4 redirect support for + nf_tables. + config NF_NAT_SNMP_BASIC tristate "Basic SNMP-ALG support" depends on NF_CONNTRACK_SNMP diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index f4cef5af096978e813e179cfbe714862799556d4..7fe6c703528f79f3ba6d355724c26f32e20a21c5 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o +obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o # generic IP tables diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c index ccfc78db12ee8acae68faf451f2cf6bc5597f2c1..d059182c1466fabbc689d0c49715bc441b9c0f72 100644 --- a/net/ipv4/netfilter/nf_log_arp.c +++ b/net/ipv4/netfilter/nf_log_arp.c @@ -10,6 +10,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include @@ -74,12 +75,12 @@ static void dump_arp_packet(struct nf_log_buf *m, ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); } -void nf_log_arp_packet(struct net *net, u_int8_t pf, - unsigned int hooknum, const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct nf_loginfo *loginfo, - const char *prefix) +static void nf_log_arp_packet(struct net *net, u_int8_t pf, + unsigned int hooknum, const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const struct nf_loginfo *loginfo, + const char *prefix) { struct nf_log_buf *m; @@ -130,8 +131,17 @@ static int __init nf_log_arp_init(void) if (ret < 0) return ret; - nf_log_register(NFPROTO_ARP, &nf_arp_logger); + ret = nf_log_register(NFPROTO_ARP, &nf_arp_logger); + if (ret < 0) { + pr_err("failed to register logger\n"); + goto err1; + } + return 0; + +err1: + unregister_pernet_subsys(&nf_log_arp_net_ops); + return ret; } static void __exit nf_log_arp_exit(void) diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c index 078bdca1b607a167e05e7cf1bdfedccdd5aca92a..75101980eeee197a4f8413bbd7d29f4fd9e4bb74 100644 --- a/net/ipv4/netfilter/nf_log_ipv4.c +++ b/net/ipv4/netfilter/nf_log_ipv4.c @@ -5,6 +5,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include @@ -366,8 +367,17 @@ static int __init nf_log_ipv4_init(void) if (ret < 0) return ret; - nf_log_register(NFPROTO_IPV4, &nf_ip_logger); + ret = nf_log_register(NFPROTO_IPV4, &nf_ip_logger); + if (ret < 0) { + pr_err("failed to register logger\n"); + goto err1; + } + return 0; + +err1: + unregister_pernet_subsys(&nf_log_ipv4_net_ops); + return ret; } static void __exit nf_log_ipv4_exit(void) diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 1baaa83dfe5ce63b8ed7cb8fb6e9e3798671e4de..536da7bc598ae9321224bb16c8d800571a163153 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c new file mode 100644 index 0000000000000000000000000000000000000000..ff2d23d8c87a16964e29f478640dccd5ca527a02 --- /dev/null +++ b/net/ipv4/netfilter/nft_redir_ipv4.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014 Arturo Borrero Gonzalez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void nft_redir_ipv4_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt) +{ + struct nft_redir *priv = nft_expr_priv(expr); + struct nf_nat_ipv4_multi_range_compat mr; + unsigned int verdict; + + memset(&mr, 0, sizeof(mr)); + if (priv->sreg_proto_min) { + mr.range[0].min.all = (__force __be16) + data[priv->sreg_proto_min].data[0]; + mr.range[0].max.all = (__force __be16) + data[priv->sreg_proto_max].data[0]; + mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; + } + + mr.range[0].flags |= priv->flags; + + verdict = nf_nat_redirect_ipv4(pkt->skb, &mr, pkt->ops->hooknum); + data[NFT_REG_VERDICT].verdict = verdict; +} + +static struct nft_expr_type nft_redir_ipv4_type; +static const struct nft_expr_ops nft_redir_ipv4_ops = { + .type = &nft_redir_ipv4_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)), + .eval = nft_redir_ipv4_eval, + .init = nft_redir_init, + .dump = nft_redir_dump, + .validate = nft_redir_validate, +}; + +static struct nft_expr_type nft_redir_ipv4_type __read_mostly = { + .family = NFPROTO_IPV4, + .name = "redir", + .ops = &nft_redir_ipv4_ops, + .policy = nft_redir_policy, + .maxattr = NFTA_REDIR_MAX, + .owner = THIS_MODULE, +}; + +static int __init nft_redir_ipv4_module_init(void) +{ + return nft_register_expr(&nft_redir_ipv4_type); +} + +static void __exit nft_redir_ipv4_module_exit(void) +{ + nft_unregister_expr(&nft_redir_ipv4_type); +} + +module_init(nft_redir_ipv4_module_init); +module_exit(nft_redir_ipv4_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arturo Borrero Gonzalez "); +MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "redir"); diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index ed33299c56d15089055f4664ce28ef3da36f9ee3..d729542bd1b7901c174c62996b13110517a49032 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c @@ -19,9 +19,9 @@ #include #include -void nft_reject_ipv4_eval(const struct nft_expr *expr, - struct nft_data data[NFT_REG_MAX + 1], - const struct nft_pktinfo *pkt) +static void nft_reject_ipv4_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt) { struct nft_reject *priv = nft_expr_priv(expr); @@ -36,7 +36,6 @@ void nft_reject_ipv4_eval(const struct nft_expr *expr, data[NFT_REG_VERDICT].verdict = NF_DROP; } -EXPORT_SYMBOL_GPL(nft_reject_ipv4_eval); static struct nft_expr_type nft_reject_ipv4_type; static const struct nft_expr_ops nft_reject_ipv4_ops = { diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 5d740cccf69ec29d9778ddb0568c726266904ce9..c0d82f78d364fe5561f819f953e3eb48d2f134da 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -662,7 +662,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, * Fetch the ICMP header provided by the userland. * iovec is modified! The ICMP header is consumed. */ - if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len)) + if (memcpy_from_msg(user_icmph, msg, icmph_len)) return -EFAULT; if (family == AF_INET) { @@ -811,7 +811,8 @@ back_from_confirm: pfh.icmph.checksum = 0; pfh.icmph.un.echo.id = inet->inet_sport; pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence; - pfh.iov = msg->msg_iov; + /* XXX: stripping const */ + pfh.iov = (struct iovec *)msg->msg_iter.iov; pfh.wcheck = 0; pfh.family = AF_INET; @@ -869,7 +870,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } /* Don't bother checking the checksum */ - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; @@ -949,7 +950,7 @@ EXPORT_SYMBOL_GPL(ping_queue_rcv_skb); * All we need to do is get the socket. */ -void ping_rcv(struct sk_buff *skb) +bool ping_rcv(struct sk_buff *skb) { struct sock *sk; struct net *net = dev_net(skb->dev); @@ -968,11 +969,11 @@ void ping_rcv(struct sk_buff *skb) pr_debug("rcv on socket %p\n", sk); ping_queue_rcv_skb(sk, skb_get(skb)); sock_put(sk); - return; + return true; } pr_debug("no socket, dropping\n"); - /* We're called from icmp_rcv(). kfree_skb() is done there. */ + return false; } EXPORT_SYMBOL_GPL(ping_rcv); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 8e3eb39f84e72c9e0f487bcfc8950b544b882a11..8f9cd200ce20118f5e5af7d2faa115d0f19d57f0 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -181,6 +181,7 @@ static const struct snmp_mib snmp4_udp_list[] = { SNMP_MIB_ITEM("RcvbufErrors", UDP_MIB_RCVBUFERRORS), SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS), + SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI), SNMP_MIB_SENTINEL }; @@ -287,6 +288,10 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPWantZeroWindowAdv", LINUX_MIB_TCPWANTZEROWINDOWADV), SNMP_MIB_ITEM("TCPSynRetrans", LINUX_MIB_TCPSYNRETRANS), SNMP_MIB_ITEM("TCPOrigDataSent", LINUX_MIB_TCPORIGDATASENT), + SNMP_MIB_ITEM("TCPHystartTrainDetect", LINUX_MIB_TCPHYSTARTTRAINDETECT), + SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND), + SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT), + SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND), SNMP_MIB_SENTINEL }; @@ -296,12 +301,12 @@ static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals, int j; if (count) { - seq_printf(seq, "\nIcmpMsg:"); + seq_puts(seq, "\nIcmpMsg:"); for (j = 0; j < count; ++j) seq_printf(seq, " %sType%u", type[j] & 0x100 ? "Out" : "In", type[j] & 0xff); - seq_printf(seq, "\nIcmpMsg:"); + seq_puts(seq, "\nIcmpMsg:"); for (j = 0; j < count; ++j) seq_printf(seq, " %lu", vals[j]); } @@ -342,7 +347,7 @@ static void icmp_put(struct seq_file *seq) seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors"); for (i = 0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " In%s", icmpmibmap[i].name); - seq_printf(seq, " OutMsgs OutErrors"); + seq_puts(seq, " OutMsgs OutErrors"); for (i = 0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " Out%s", icmpmibmap[i].name); seq_printf(seq, "\nIcmp: %lu %lu %lu", diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 739db3100c2375ebd5113c4224b20423a585cc12..0bb68df5055d2d3f92cb06e829a997b44b512d65 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -79,6 +79,16 @@ #include #include #include +#include + +struct raw_frag_vec { + struct msghdr *msg; + union { + struct icmphdr icmph; + char c[1]; + } hdr; + int hlen; +}; static struct raw_hashinfo raw_v4_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock), @@ -420,53 +430,57 @@ error: return err; } -static int raw_probe_proto_opt(struct flowi4 *fl4, struct msghdr *msg) +static int raw_probe_proto_opt(struct raw_frag_vec *rfv, struct flowi4 *fl4) { - struct iovec *iov; - u8 __user *type = NULL; - u8 __user *code = NULL; - int probed = 0; - unsigned int i; + int err; - if (!msg->msg_iov) + if (fl4->flowi4_proto != IPPROTO_ICMP) return 0; - for (i = 0; i < msg->msg_iovlen; i++) { - iov = &msg->msg_iov[i]; - if (!iov) - continue; - - switch (fl4->flowi4_proto) { - case IPPROTO_ICMP: - /* check if one-byte field is readable or not. */ - if (iov->iov_base && iov->iov_len < 1) - break; - - if (!type) { - type = iov->iov_base; - /* check if code field is readable or not. */ - if (iov->iov_len > 1) - code = type + 1; - } else if (!code) - code = iov->iov_base; - - if (type && code) { - if (get_user(fl4->fl4_icmp_type, type) || - get_user(fl4->fl4_icmp_code, code)) - return -EFAULT; - probed = 1; - } - break; - default: - probed = 1; - break; - } - if (probed) - break; - } + /* We only need the first two bytes. */ + rfv->hlen = 2; + + err = memcpy_from_msg(rfv->hdr.c, rfv->msg, rfv->hlen); + if (err) + return err; + + fl4->fl4_icmp_type = rfv->hdr.icmph.type; + fl4->fl4_icmp_code = rfv->hdr.icmph.code; + return 0; } +static int raw_getfrag(void *from, char *to, int offset, int len, int odd, + struct sk_buff *skb) +{ + struct raw_frag_vec *rfv = from; + + if (offset < rfv->hlen) { + int copy = min(rfv->hlen - offset, len); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + memcpy(to, rfv->hdr.c + offset, copy); + else + skb->csum = csum_block_add( + skb->csum, + csum_partial_copy_nocheck(rfv->hdr.c + offset, + to, copy, 0), + odd); + + odd = 0; + offset += copy; + to += copy; + len -= copy; + + if (!len) + return 0; + } + + offset -= rfv->hlen; + + return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); +} + static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { @@ -480,6 +494,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, u8 tos; int err; struct ip_options_data opt_copy; + struct raw_frag_vec rfv; err = -EMSGSIZE; if (len > 0xFFFF) @@ -585,7 +600,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, daddr, saddr, 0, 0); if (!inet->hdrincl) { - err = raw_probe_proto_opt(&fl4, msg); + rfv.msg = msg; + rfv.hlen = 0; + + err = raw_probe_proto_opt(&rfv, &fl4); if (err) goto done; } @@ -607,7 +625,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, back_from_confirm: if (inet->hdrincl) - err = raw_send_hdrinc(sk, &fl4, msg->msg_iov, len, + /* XXX: stripping const */ + err = raw_send_hdrinc(sk, &fl4, (struct iovec *)msg->msg_iter.iov, len, &rt, msg->msg_flags); else { @@ -616,8 +635,8 @@ back_from_confirm: if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); - err = ip_append_data(sk, &fl4, ip_generic_getfrag, - msg->msg_iov, len, 0, + err = ip_append_data(sk, &fl4, raw_getfrag, + &rfv, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); @@ -718,7 +737,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 32b98d0207b48b07472f0954e327483900be08e6..45fe60c5238e99d0a639ecf4698cddab141f0fdd 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -19,10 +19,6 @@ #include #include -/* Timestamps: lowest bits store TCP options */ -#define TSBITS 6 -#define TSMASK (((__u32)1 << TSBITS) - 1) - extern int sysctl_tcp_syncookies; static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; @@ -30,6 +26,30 @@ static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) +/* TCP Timestamp: 6 lowest bits of timestamp sent in the cookie SYN-ACK + * stores TCP options: + * + * MSB LSB + * | 31 ... 6 | 5 | 4 | 3 2 1 0 | + * | Timestamp | ECN | SACK | WScale | + * + * When we receive a valid cookie-ACK, we look at the echoed tsval (if + * any) to figure out which TCP options we should use for the rebuilt + * connection. + * + * A WScale setting of '0xf' (which is an invalid scaling value) + * means that original syn did not include the TCP window scaling option. + */ +#define TS_OPT_WSCALE_MASK 0xf +#define TS_OPT_SACK BIT(4) +#define TS_OPT_ECN BIT(5) +/* There is no TS_OPT_TIMESTAMP: + * if ACK contains timestamp option, we already know it was + * requested/supported by the syn/synack exchange. + */ +#define TSBITS 6 +#define TSMASK (((__u32)1 << TSBITS) - 1) + static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv4_cookie_scratch); @@ -67,9 +87,11 @@ __u32 cookie_init_timestamp(struct request_sock *req) ireq = inet_rsk(req); - options = ireq->wscale_ok ? ireq->snd_wscale : 0xf; - options |= ireq->sack_ok << 4; - options |= ireq->ecn_ok << 5; + options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK; + if (ireq->sack_ok) + options |= TS_OPT_SACK; + if (ireq->ecn_ok) + options |= TS_OPT_ECN; ts = ts_now & ~TSMASK; ts |= options; @@ -219,16 +241,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, * additional tcp options in the timestamp. * This extracts these options from the timestamp echo. * - * The lowest 4 bits store snd_wscale. - * next 2 bits indicate SACK and ECN support. - * - * return false if we decode an option that should not be. + * return false if we decode a tcp option that is disabled + * on the host. */ -bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, - struct net *net, bool *ecn_ok) +bool cookie_timestamp_decode(struct tcp_options_received *tcp_opt) { /* echoed timestamp, lowest bits contain options */ - u32 options = tcp_opt->rcv_tsecr & TSMASK; + u32 options = tcp_opt->rcv_tsecr; if (!tcp_opt->saw_tstamp) { tcp_clear_options(tcp_opt); @@ -238,22 +257,35 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, if (!sysctl_tcp_timestamps) return false; - tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0; - *ecn_ok = (options >> 5) & 1; - if (*ecn_ok && !net->ipv4.sysctl_tcp_ecn) - return false; + tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0; if (tcp_opt->sack_ok && !sysctl_tcp_sack) return false; - if ((options & 0xf) == 0xf) + if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK) return true; /* no window scaling */ tcp_opt->wscale_ok = 1; - tcp_opt->snd_wscale = options & 0xf; + tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK; + return sysctl_tcp_window_scaling != 0; } -EXPORT_SYMBOL(cookie_check_timestamp); +EXPORT_SYMBOL(cookie_timestamp_decode); + +bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt, + const struct net *net, const struct dst_entry *dst) +{ + bool ecn_ok = tcp_opt->rcv_tsecr & TS_OPT_ECN; + + if (!ecn_ok) + return false; + + if (net->ipv4.sysctl_tcp_ecn) + return true; + + return dst_feature(dst, RTAX_FEATURE_ECN); +} +EXPORT_SYMBOL(cookie_ecn_ok); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) { @@ -269,14 +301,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) int mss; struct rtable *rt; __u8 rcv_wscale; - bool ecn_ok = false; struct flowi4 fl4; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; - if (tcp_synq_no_recent_overflow(sk) || - (mss = __cookie_v4_check(ip_hdr(skb), th, cookie)) == 0) { + if (tcp_synq_no_recent_overflow(sk)) + goto out; + + mss = __cookie_v4_check(ip_hdr(skb), th, cookie); + if (mss == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } @@ -287,7 +321,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, 0, NULL); - if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok)) + if (!cookie_timestamp_decode(&tcp_opt)) goto out; ret = NULL; @@ -305,7 +339,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->ir_loc_addr = ip_hdr(skb)->daddr; ireq->ir_rmt_addr = ip_hdr(skb)->saddr; ireq->ir_mark = inet_request_mark(sk, skb); - ireq->ecn_ok = ecn_ok; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; @@ -354,6 +387,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) dst_metric(&rt->dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; + ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst); ret = get_cookie_sock(sk, skb, req, &rt->dst); /* ip_queue_xmit() depends on our flow being setup diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index b3c53c8b331efc3d5cf6437fd3ec7634a154263c..e0ee384a448fb0e6eb5b957d98dbcb272ea97edb 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -495,6 +495,13 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_max_reordering", + .data = &sysctl_tcp_max_reordering, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, { .procname = "tcp_dsack", .data = &sysctl_tcp_dsack, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 38c2bcb8dd5da4f2c8dede10942a1388fd3d9954..3075723c729bc98edf3a15eb0d0fbe172c300bbc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -835,47 +835,29 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, int large_allowed) { struct tcp_sock *tp = tcp_sk(sk); - u32 xmit_size_goal, old_size_goal; - - xmit_size_goal = mss_now; - - if (large_allowed && sk_can_gso(sk)) { - u32 gso_size, hlen; - - /* Maybe we should/could use sk->sk_prot->max_header here ? */ - hlen = inet_csk(sk)->icsk_af_ops->net_header_len + - inet_csk(sk)->icsk_ext_hdr_len + - tp->tcp_header_len; - - /* Goal is to send at least one packet per ms, - * not one big TSO packet every 100 ms. - * This preserves ACK clocking and is consistent - * with tcp_tso_should_defer() heuristic. - */ - gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC); - gso_size = max_t(u32, gso_size, - sysctl_tcp_min_tso_segs * mss_now); - - xmit_size_goal = min_t(u32, gso_size, - sk->sk_gso_max_size - 1 - hlen); - - xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); - - /* We try hard to avoid divides here */ - old_size_goal = tp->xmit_size_goal_segs * mss_now; - - if (likely(old_size_goal <= xmit_size_goal && - old_size_goal + mss_now > xmit_size_goal)) { - xmit_size_goal = old_size_goal; - } else { - tp->xmit_size_goal_segs = - min_t(u16, xmit_size_goal / mss_now, - sk->sk_gso_max_segs); - xmit_size_goal = tp->xmit_size_goal_segs * mss_now; - } + u32 new_size_goal, size_goal, hlen; + + if (!large_allowed || !sk_can_gso(sk)) + return mss_now; + + /* Maybe we should/could use sk->sk_prot->max_header here ? */ + hlen = inet_csk(sk)->icsk_af_ops->net_header_len + + inet_csk(sk)->icsk_ext_hdr_len + + tp->tcp_header_len; + + new_size_goal = sk->sk_gso_max_size - 1 - hlen; + new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); + + /* We try hard to avoid divides here */ + size_goal = tp->gso_segs * mss_now; + if (unlikely(new_size_goal < size_goal || + new_size_goal >= size_goal + mss_now)) { + tp->gso_segs = min_t(u16, new_size_goal / mss_now, + sk->sk_gso_max_segs); + size_goal = tp->gso_segs * mss_now; } - return max(xmit_size_goal, mss_now); + return max(size_goal, mss_now); } static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) @@ -1085,7 +1067,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { - struct iovec *iov; + const struct iovec *iov; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int iovlen, flags, err, copied = 0; @@ -1136,8 +1118,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, mss_now = tcp_send_mss(sk, &size_goal, flags); /* Ok commence sending. */ - iovlen = msg->msg_iovlen; - iov = msg->msg_iov; + iovlen = msg->msg_iter.nr_segs; + iov = msg->msg_iter.iov; copied = 0; err = -EPIPE; @@ -1349,7 +1331,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) if (len > 0) { if (!(flags & MSG_TRUNC)) - err = memcpy_toiovec(msg->msg_iov, &c, 1); + err = memcpy_to_msg(msg, &c, 1); len = 1; } else msg->msg_flags |= MSG_TRUNC; @@ -1377,7 +1359,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) /* XXX -- need to support SO_PEEK_OFF */ skb_queue_walk(&sk->sk_write_queue, skb) { - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len); + err = skb_copy_datagram_msg(skb, 0, msg, skb->len); if (err) break; @@ -1729,7 +1711,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { user_recv = current; tp->ucopy.task = user_recv; - tp->ucopy.iov = msg->msg_iov; + tp->ucopy.msg = msg; } tp->ucopy.len = len; @@ -1833,8 +1815,7 @@ do_prequeue: } if (!(flags & MSG_TRUNC)) { - err = skb_copy_datagram_iovec(skb, offset, - msg->msg_iov, used); + err = skb_copy_datagram_msg(skb, offset, msg, used); if (err) { /* Exception. Bailout! */ if (!copied) diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index b1c5970d47a1bf65a9c9cbf6a89d13b0bb8f909f..27ead0dd16bc7e444e96781ff01b10c444678396 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -1,5 +1,5 @@ /* - * Plugable TCP congestion control support and newReno + * Pluggable TCP congestion control support and newReno * congestion control. * Based on ideas from I/O scheduler support and Web100. * diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 20de0118c98e455447128513610ba04b13f3cd61..6b6002416a73950d493661ea1459870f49917efc 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -363,16 +363,28 @@ static void hystart_update(struct sock *sk, u32 delay) struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); - if (!(ca->found & hystart_detect)) { + if (ca->found & hystart_detect) + return; + + if (hystart_detect & HYSTART_ACK_TRAIN) { u32 now = bictcp_clock(); /* first detection parameter - ack-train detection */ if ((s32)(now - ca->last_ack) <= hystart_ack_delta) { ca->last_ack = now; - if ((s32)(now - ca->round_start) > ca->delay_min >> 4) + if ((s32)(now - ca->round_start) > ca->delay_min >> 4) { ca->found |= HYSTART_ACK_TRAIN; + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINDETECT); + NET_ADD_STATS_BH(sock_net(sk), + LINUX_MIB_TCPHYSTARTTRAINCWND, + tp->snd_cwnd); + tp->snd_ssthresh = tp->snd_cwnd; + } } + } + if (hystart_detect & HYSTART_DELAY) { /* obtain the minimum delay of more than sampling packets */ if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { if (ca->curr_rtt == 0 || ca->curr_rtt > delay) @@ -381,15 +393,16 @@ static void hystart_update(struct sock *sk, u32 delay) ca->sample_cnt++; } else { if (ca->curr_rtt > ca->delay_min + - HYSTART_DELAY_THRESH(ca->delay_min>>4)) + HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { ca->found |= HYSTART_DELAY; + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYDETECT); + NET_ADD_STATS_BH(sock_net(sk), + LINUX_MIB_TCPHYSTARTDELAYCWND, + tp->snd_cwnd); + tp->snd_ssthresh = tp->snd_cwnd; + } } - /* - * Either one of two conditions are met, - * we exit from slow start immediately. - */ - if (ca->found & hystart_detect) - tp->snd_ssthresh = tp->snd_cwnd; } } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d107ee246a1d32703ccc260309d52c3493927862..075ab4d5af5e46e7a5a177a324228df592cbe5e3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -81,6 +81,7 @@ int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_sack __read_mostly = 1; int sysctl_tcp_fack __read_mostly = 1; int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; +int sysctl_tcp_max_reordering __read_mostly = 300; EXPORT_SYMBOL(sysctl_tcp_reordering); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; @@ -833,7 +834,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, if (metric > tp->reordering) { int mib_idx; - tp->reordering = min(TCP_MAX_REORDERING, metric); + tp->reordering = min(sysctl_tcp_max_reordering, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) @@ -4367,7 +4368,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto err_free; - if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) + if (memcpy_from_msg(skb_put(skb, size), msg, size)) goto err_free; TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; @@ -4420,7 +4421,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) __set_current_state(TASK_RUNNING); local_bh_enable(); - if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { + if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; eaten = (chunk == skb->len); @@ -4940,10 +4941,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) local_bh_enable(); if (skb_csum_unnecessary(skb)) - err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); + err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk); else - err = skb_copy_and_csum_datagram_iovec(skb, hlen, - tp->ucopy.iov); + err = skb_copy_and_csum_datagram_msg(skb, hlen, tp->ucopy.msg); if (!err) { tp->ucopy.len -= chunk; @@ -5030,7 +5030,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, /* step 3: check security and precedence [ignored] */ /* step 4: Check for a SYN - * RFC 5691 4.2 : Send a challenge ack + * RFC 5961 4.2 : Send a challenge ack */ if (th->syn) { syn_challenge: @@ -5853,12 +5853,12 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) struct inet_request_sock *ireq = inet_rsk(req); if (family == AF_INET) - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"), - &ireq->ir_rmt_addr, port); + net_dbg_ratelimited("drop open request from %pI4/%u\n", + &ireq->ir_rmt_addr, port); #if IS_ENABLED(CONFIG_IPV6) else if (family == AF_INET6) - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI6/%u\n"), - &ireq->ir_v6_rmt_addr, port); + net_dbg_ratelimited("drop open request from %pI6/%u\n", + &ireq->ir_v6_rmt_addr, port); #endif } @@ -5867,7 +5867,7 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) * If we receive a SYN packet with these bits set, it means a * network is playing bad games with TOS bits. In order to * avoid possible false congestion notifications, we disable - * TCP ECN negociation. + * TCP ECN negotiation. * * Exception: tcp_ca wants ECN. This is required for DCTCP * congestion control; it requires setting ECT on all packets, @@ -5877,20 +5877,22 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) */ static void tcp_ecn_create_request(struct request_sock *req, const struct sk_buff *skb, - const struct sock *listen_sk) + const struct sock *listen_sk, + const struct dst_entry *dst) { const struct tcphdr *th = tcp_hdr(skb); const struct net *net = sock_net(listen_sk); bool th_ecn = th->ece && th->cwr; - bool ect, need_ecn; + bool ect, need_ecn, ecn_ok; if (!th_ecn) return; ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); need_ecn = tcp_ca_needs_ecn(listen_sk); + ecn_ok = net->ipv4.sysctl_tcp_ecn || dst_feature(dst, RTAX_FEATURE_ECN); - if (!ect && !need_ecn && net->ipv4.sysctl_tcp_ecn) + if (!ect && !need_ecn && ecn_ok) inet_rsk(req)->ecn_ok = 1; else if (ect && need_ecn) inet_rsk(req)->ecn_ok = 1; @@ -5955,13 +5957,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; - if (!want_cookie || tmp_opt.tstamp_ok) - tcp_ecn_create_request(req, skb, sk); - - if (want_cookie) { - isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); - req->cookie_ts = tmp_opt.tstamp_ok; - } else if (!isn) { + if (!want_cookie && !isn) { /* VJ's idea. We save last timestamp seen * from the destination in peer table, when entering * state TIME-WAIT, and check against it before @@ -6009,6 +6005,15 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, goto drop_and_free; } + tcp_ecn_create_request(req, skb, sk, dst); + + if (want_cookie) { + isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); + req->cookie_ts = tmp_opt.tstamp_ok; + if (!tmp_opt.tstamp_ok) + inet_rsk(req)->ecn_ok = 0; + } + tcp_rsk(req)->snt_isn = isn; tcp_openreq_init_rwin(req, sk, dst); fastopen = !want_cookie && diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 147be202429064d03b34405dba575c8d002b267c..a3f72d7fc06c07c43e1c00b67970eaee074e4593 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -623,6 +623,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); + net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); #ifdef CONFIG_TCP_MD5SIG hash_location = tcp_parse_md5sig_option(th); if (!sk && hash_location) { @@ -633,7 +634,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ - sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev), + sk1 = __inet_lookup_listener(net, &tcp_hashinfo, ip_hdr(skb)->saddr, th->source, ip_hdr(skb)->daddr, ntohs(th->source), inet_iif(skb)); @@ -681,7 +682,6 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) if (sk) arg.bound_dev_if = sk->sk_bound_dev_if; - net = dev_net(skb_dst(skb)->dev); arg.tos = ip_hdr(skb)->tos; ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, @@ -1432,6 +1432,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); + sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, 0) == NULL) { @@ -1453,6 +1454,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); + sk_mark_napi_id(sk, skb); if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; goto reset; @@ -1664,7 +1666,7 @@ process: if (sk_filter(sk, skb)) goto discard_and_relse; - sk_mark_napi_id(sk, skb); + sk_incoming_cpu_update(sk); skb->dev = NULL; bh_lock_sock_nested(sk); diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 5b90f2f447a511703bbce1e22e8ef55888c43479..9d7930ba8e0f4c51c8d5434c5ea186b585ff3a35 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -94,9 +94,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | - SKB_GSO_MPLS | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | + SKB_GSO_TUNNEL_REMCSUM | 0) || !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) goto out; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a3d453b94747c22dd0fad07dfe606fdd11c414b1..7f18262e2326ac4d7963347d7458273a325caa64 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -333,10 +333,19 @@ static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); + bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || + tcp_ca_needs_ecn(sk); + + if (!use_ecn) { + const struct dst_entry *dst = __sk_dst_get(sk); + + if (dst && dst_feature(dst, RTAX_FEATURE_ECN)) + use_ecn = true; + } tp->ecn_flags = 0; - if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || - tcp_ca_needs_ecn(sk)) { + + if (use_ecn) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; tp->ecn_flags = TCP_ECN_OK; if (tcp_ca_needs_ecn(sk)) @@ -1515,6 +1524,27 @@ static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, ((nonagle & TCP_NAGLE_CORK) || (!nonagle && tp->packets_out && tcp_minshall_check(tp))); } + +/* Return how many segs we'd like on a TSO packet, + * to send one TSO packet per ms + */ +static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now) +{ + u32 bytes, segs; + + bytes = min(sk->sk_pacing_rate >> 10, + sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); + + /* Goal is to send at least one packet per ms, + * not one big TSO packet every 100 ms. + * This preserves ACK clocking and is consistent + * with tcp_tso_should_defer() heuristic. + */ + segs = max_t(u32, bytes / mss_now, sysctl_tcp_min_tso_segs); + + return min_t(u32, segs, sk->sk_gso_max_segs); +} + /* Returns the portion of skb which can be sent right away */ static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, @@ -1553,7 +1583,7 @@ static unsigned int tcp_mss_split_point(const struct sock *sk, static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, const struct sk_buff *skb) { - u32 in_flight, cwnd; + u32 in_flight, cwnd, halfcwnd; /* Don't be strict about the congestion window for the final FIN. */ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && @@ -1562,10 +1592,14 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, in_flight = tcp_packets_in_flight(tp); cwnd = tp->snd_cwnd; - if (in_flight < cwnd) - return (cwnd - in_flight); + if (in_flight >= cwnd) + return 0; - return 0; + /* For better scheduling, ensure we have at least + * 2 GSO packets in flight. + */ + halfcwnd = max(cwnd >> 1, 1U); + return min(halfcwnd, cwnd - in_flight); } /* Initialize TSO state of a skb. @@ -1718,7 +1752,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, * This algorithm is from John Heffner. */ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, - bool *is_cwnd_limited) + bool *is_cwnd_limited, u32 max_segs) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1748,8 +1782,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, limit = min(send_win, cong_win); /* If a full-sized TSO skb can be sent, do it. */ - if (limit >= min_t(unsigned int, sk->sk_gso_max_size, - tp->xmit_size_goal_segs * tp->mss_cache)) + if (limit >= max_segs * tp->mss_cache) goto send_now; /* Middle in queue won't get any more data, full sendable already? */ @@ -1946,6 +1979,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, int cwnd_quota; int result; bool is_cwnd_limited = false; + u32 max_segs; sent_pkts = 0; @@ -1959,6 +1993,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, } } + max_segs = tcp_tso_autosize(sk, mss_now); while ((skb = tcp_send_head(sk))) { unsigned int limit; @@ -1991,10 +2026,23 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, break; } else { if (!push_one && - tcp_tso_should_defer(sk, skb, &is_cwnd_limited)) + tcp_tso_should_defer(sk, skb, &is_cwnd_limited, + max_segs)) break; } + limit = mss_now; + if (tso_segs > 1 && !tcp_urg_mode(tp)) + limit = tcp_mss_split_point(sk, skb, mss_now, + min_t(unsigned int, + cwnd_quota, + max_segs), + nonagle); + + if (skb->len > limit && + unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) + break; + /* TCP Small Queues : * Control number of packets in qdisc/devices to two packets / or ~1 ms. * This allows for : @@ -2005,8 +2053,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, * of queued bytes to ensure line rate. * One example is wifi aggregation (802.11 AMPDU) */ - limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes, - sk->sk_pacing_rate >> 10); + limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10); + limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes); if (atomic_read(&sk->sk_wmem_alloc) > limit) { set_bit(TSQ_THROTTLED, &tp->tsq_flags); @@ -2019,18 +2067,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, break; } - limit = mss_now; - if (tso_segs > 1 && !tcp_urg_mode(tp)) - limit = tcp_mss_split_point(sk, skb, mss_now, - min_t(unsigned int, - cwnd_quota, - sk->sk_gso_max_segs), - nonagle); - - if (skb->len > limit && - unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) - break; - if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) break; @@ -2998,9 +3034,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_request *fo = tp->fastopen_req; - int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen; - struct sk_buff *syn_data = NULL, *data; + int syn_loss = 0, space, err = 0; unsigned long last_syn_loss = 0; + struct sk_buff *syn_data; tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, @@ -3031,48 +3067,40 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) /* limit to order-0 allocations */ space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); - syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space, - sk->sk_allocation); - if (syn_data == NULL) + syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation); + if (!syn_data) + goto fallback; + syn_data->ip_summed = CHECKSUM_PARTIAL; + memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); + if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space), + fo->data->msg_iter.iov, 0, space))) { + kfree_skb(syn_data); goto fallback; + } - for (i = 0; i < iovlen && syn_data->len < space; ++i) { - struct iovec *iov = &fo->data->msg_iov[i]; - unsigned char __user *from = iov->iov_base; - int len = iov->iov_len; + /* No more data pending in inet_wait_for_connect() */ + if (space == fo->size) + fo->data = NULL; + fo->copied = space; - if (syn_data->len + len > space) - len = space - syn_data->len; - else if (i + 1 == iovlen) - /* No more data pending in inet_wait_for_connect() */ - fo->data = NULL; + tcp_connect_queue_skb(sk, syn_data); - if (skb_add_data(syn_data, from, len)) - goto fallback; - } + err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); - /* Queue a data-only packet after the regular SYN for retransmission */ - data = pskb_copy(syn_data, sk->sk_allocation); - if (data == NULL) - goto fallback; - TCP_SKB_CB(data)->seq++; - TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN; - TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH); - tcp_connect_queue_skb(sk, data); - fo->copied = data->len; - - /* syn_data is about to be sent, we need to take current time stamps - * for the packets that are in write queue : SYN packet and DATA - */ - skb_mstamp_get(&syn->skb_mstamp); - data->skb_mstamp = syn->skb_mstamp; + syn->skb_mstamp = syn_data->skb_mstamp; - if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) { + /* Now full SYN+DATA was cloned and sent (or not), + * remove the SYN from the original skb (syn_data) + * we keep in write queue in case of a retransmit, as we + * also have the SYN packet (with no data) in the same queue. + */ + TCP_SKB_CB(syn_data)->seq++; + TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; + if (!err) { tp->syn_data = (fo->copied > 0); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); goto done; } - syn_data = NULL; fallback: /* Send a regular SYN with Fast Open cookie request option */ @@ -3081,7 +3109,6 @@ fallback: err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); if (err) tp->syn_fastopen = 0; - kfree_skb(syn_data); done: fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ return err; @@ -3101,13 +3128,10 @@ int tcp_connect(struct sock *sk) return 0; } - buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); - if (unlikely(buff == NULL)) + buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); + if (unlikely(!buff)) return -ENOBUFS; - /* Reserve space for headers. */ - skb_reserve(buff, MAX_TCP_HEADER); - tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); tp->retrans_stamp = tcp_time_stamp; tcp_connect_queue_skb(sk, buff); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 9b21ae8b2e31d85acf2822baf9de0b96e7162d39..1829c7fbc77e4ded47744a60e0b55c11c635e82d 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -374,17 +374,19 @@ void tcp_retransmit_timer(struct sock *sk) */ struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"), - &inet->inet_daddr, - ntohs(inet->inet_dport), inet->inet_num, - tp->snd_una, tp->snd_nxt); + net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", + &inet->inet_daddr, + ntohs(inet->inet_dport), + inet->inet_num, + tp->snd_una, tp->snd_nxt); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"), - &sk->sk_v6_daddr, - ntohs(inet->inet_dport), inet->inet_num, - tp->snd_una, tp->snd_nxt); + net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", + &sk->sk_v6_daddr, + ntohs(inet->inet_dport), + inet->inet_num, + tp->snd_una, tp->snd_nxt); } #endif if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cd0db5471bb5eb6048208404aab71253867e1eec..13b4dcf86ef610d1fcc1b26f7f69f5a6bbd31686 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -144,7 +144,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); - sk_nulls_for_each(sk2, node, &hslot->head) + sk_nulls_for_each(sk2, node, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && @@ -152,14 +152,13 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || - !uid_eq(uid, sock_i_uid(sk2))) && - (*saddr_comp)(sk, sk2)) { - if (bitmap) - __set_bit(udp_sk(sk2)->udp_port_hash >> log, - bitmap); - else + !uid_eq(uid, sock_i_uid(sk2))) && + saddr_comp(sk, sk2)) { + if (!bitmap) return 1; + __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); } + } return 0; } @@ -168,10 +167,10 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, * can insert/delete a socket with local_port == num */ static int udp_lib_lport_inuse2(struct net *net, __u16 num, - struct udp_hslot *hslot2, - struct sock *sk, - int (*saddr_comp)(const struct sock *sk1, - const struct sock *sk2)) + struct udp_hslot *hslot2, + struct sock *sk, + int (*saddr_comp)(const struct sock *sk1, + const struct sock *sk2)) { struct sock *sk2; struct hlist_nulls_node *node; @@ -179,7 +178,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, int res = 0; spin_lock(&hslot2->lock); - udp_portaddr_for_each_entry(sk2, node, &hslot2->head) + udp_portaddr_for_each_entry(sk2, node, &hslot2->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && @@ -187,11 +186,12 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || - !uid_eq(uid, sock_i_uid(sk2))) && - (*saddr_comp)(sk, sk2)) { + !uid_eq(uid, sock_i_uid(sk2))) && + saddr_comp(sk, sk2)) { res = 1; break; } + } spin_unlock(&hslot2->lock); return res; } @@ -206,8 +206,8 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, * with NULL address */ int udp_lib_get_port(struct sock *sk, unsigned short snum, - int (*saddr_comp)(const struct sock *sk1, - const struct sock *sk2), + int (*saddr_comp)(const struct sock *sk1, + const struct sock *sk2), unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; @@ -336,38 +336,45 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum) return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); } -static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, - unsigned short hnum, - __be16 sport, __be32 daddr, __be16 dport, int dif) +static inline int compute_score(struct sock *sk, struct net *net, + __be32 saddr, unsigned short hnum, __be16 sport, + __be32 daddr, __be16 dport, int dif) { - int score = -1; + int score; + struct inet_sock *inet; - if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && - !ipv6_only_sock(sk)) { - struct inet_sock *inet = inet_sk(sk); + if (!net_eq(sock_net(sk), net) || + udp_sk(sk)->udp_port_hash != hnum || + ipv6_only_sock(sk)) + return -1; - score = (sk->sk_family == PF_INET ? 2 : 1); - if (inet->inet_rcv_saddr) { - if (inet->inet_rcv_saddr != daddr) - return -1; - score += 4; - } - if (inet->inet_daddr) { - if (inet->inet_daddr != saddr) - return -1; - score += 4; - } - if (inet->inet_dport) { - if (inet->inet_dport != sport) - return -1; - score += 4; - } - if (sk->sk_bound_dev_if) { - if (sk->sk_bound_dev_if != dif) - return -1; - score += 4; - } + score = (sk->sk_family == PF_INET) ? 2 : 1; + inet = inet_sk(sk); + + if (inet->inet_rcv_saddr) { + if (inet->inet_rcv_saddr != daddr) + return -1; + score += 4; + } + + if (inet->inet_daddr) { + if (inet->inet_daddr != saddr) + return -1; + score += 4; + } + + if (inet->inet_dport) { + if (inet->inet_dport != sport) + return -1; + score += 4; + } + + if (sk->sk_bound_dev_if) { + if (sk->sk_bound_dev_if != dif) + return -1; + score += 4; } + return score; } @@ -378,33 +385,39 @@ static inline int compute_score2(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif) { - int score = -1; + int score; + struct inet_sock *inet; + + if (!net_eq(sock_net(sk), net) || + ipv6_only_sock(sk)) + return -1; - if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) { - struct inet_sock *inet = inet_sk(sk); + inet = inet_sk(sk); - if (inet->inet_rcv_saddr != daddr) + if (inet->inet_rcv_saddr != daddr || + inet->inet_num != hnum) + return -1; + + score = (sk->sk_family == PF_INET) ? 2 : 1; + + if (inet->inet_daddr) { + if (inet->inet_daddr != saddr) return -1; - if (inet->inet_num != hnum) + score += 4; + } + + if (inet->inet_dport) { + if (inet->inet_dport != sport) return -1; + score += 4; + } - score = (sk->sk_family == PF_INET ? 2 : 1); - if (inet->inet_daddr) { - if (inet->inet_daddr != saddr) - return -1; - score += 4; - } - if (inet->inet_dport) { - if (inet->inet_dport != sport) - return -1; - score += 4; - } - if (sk->sk_bound_dev_if) { - if (sk->sk_bound_dev_if != dif) - return -1; - score += 4; - } + if (sk->sk_bound_dev_if) { + if (sk->sk_bound_dev_if != dif) + return -1; + score += 4; } + return score; } @@ -1036,7 +1049,7 @@ back_from_confirm: /* Lockless fast path for the non-corking case. */ if (!corkreq) { - skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen, + skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, sizeof(struct udphdr), &ipc, &rt, msg->msg_flags); err = PTR_ERR(skb); @@ -1051,7 +1064,7 @@ back_from_confirm: /* ... which is an evident application bug. --ANK */ release_sock(sk); - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n")); + net_dbg_ratelimited("cork app bug 2\n"); err = -EINVAL; goto out; } @@ -1067,7 +1080,7 @@ back_from_confirm: do_append_data: up->len += ulen; - err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen, + err = ip_append_data(sk, fl4, getfrag, msg, ulen, sizeof(struct udphdr), &ipc, &rt, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); if (err) @@ -1133,7 +1146,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, if (unlikely(!up->pending)) { release_sock(sk); - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n")); + net_dbg_ratelimited("udp cork app bug 3\n"); return -EINVAL; } @@ -1281,12 +1294,11 @@ try_again: } if (skb_csum_unnecessary(skb)) - err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), - msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), + msg, copied); else { - err = skb_copy_and_csum_datagram_iovec(skb, - sizeof(struct udphdr), - msg->msg_iov); + err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), + msg); if (err == -EINVAL) goto csum_copy_err; @@ -1445,6 +1457,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (inet_sk(sk)->inet_daddr) { sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); + sk_incoming_cpu_update(sk); } rc = sock_queue_rcv_skb(sk, skb); @@ -1546,8 +1559,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) * provided by the application." */ if (up->pcrlen == 0) { /* full coverage was set */ - LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n", - UDP_SKB_CB(skb)->cscov, skb->len); + net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", + UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } /* The next case involves violating the min. coverage requested @@ -1557,8 +1570,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) * Therefore the above ...()->partial_cov statement is essential. */ if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { - LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n", - UDP_SKB_CB(skb)->cscov, up->pcrlen); + net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", + UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } @@ -1647,7 +1660,8 @@ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct udphdr *uh, __be32 saddr, __be32 daddr, - struct udp_table *udptable) + struct udp_table *udptable, + int proto) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; struct hlist_nulls_node *node; @@ -1656,6 +1670,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, int dif = skb->dev->ifindex; unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); + bool inner_flushed = false; if (use_hash2) { hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & @@ -1674,6 +1689,7 @@ start_lookup: dif, hnum)) { if (unlikely(count == ARRAY_SIZE(stack))) { flush_stack(stack, count, skb, ~0); + inner_flushed = true; count = 0; } stack[count++] = sk; @@ -1695,7 +1711,10 @@ start_lookup: if (count) { flush_stack(stack, count, skb, count - 1); } else { - kfree_skb(skb); + if (!inner_flushed) + UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, + proto == IPPROTO_UDPLITE); + consume_skb(skb); } return 0; } @@ -1777,14 +1796,13 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (ret > 0) return -ret; return 0; - } else { - if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) - return __udp4_lib_mcast_deliver(net, skb, uh, - saddr, daddr, udptable); - - sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); } + if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) + return __udp4_lib_mcast_deliver(net, skb, uh, + saddr, daddr, udptable, proto); + + sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk != NULL) { int ret; @@ -1822,11 +1840,11 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, return 0; short_packet: - LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", - proto == IPPROTO_UDPLITE ? "Lite" : "", - &saddr, ntohs(uh->source), - ulen, skb->len, - &daddr, ntohs(uh->dest)); + net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", + proto == IPPROTO_UDPLITE ? "Lite" : "", + &saddr, ntohs(uh->source), + ulen, skb->len, + &daddr, ntohs(uh->dest)); goto drop; csum_error: @@ -1834,10 +1852,10 @@ csum_error: * RFC1122: OK. Discards the bad packet silently (as far as * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ - LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", - proto == IPPROTO_UDPLITE ? "Lite" : "", - &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), - ulen); + net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", + proto == IPPROTO_UDPLITE ? "Lite" : "", + &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), + ulen); UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); drop: UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); @@ -2027,7 +2045,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, } else { up->corkflag = 0; lock_sock(sk); - (*push_pending_frames)(sk); + push_pending_frames(sk); release_sock(sk); } break; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 6480cea7aa53a6df85a970279b82e3b9ed5a33e6..d3e537ef6b7f57143d272d2d085d31fdc81990e7 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -29,7 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, netdev_features_t features), - __be16 new_protocol) + __be16 new_protocol, bool is_ipv6) { struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; @@ -39,7 +39,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t enc_features; int udp_offset, outer_hlen; unsigned int oldlen; - bool need_csum; + bool need_csum = !!(skb_shinfo(skb)->gso_type & + SKB_GSO_UDP_TUNNEL_CSUM); + bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); + bool offload_csum = false, dont_encap = (need_csum || remcsum); oldlen = (u16)~skb->len; @@ -52,10 +55,13 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = new_protocol; + skb->encap_hdr_csum = need_csum; + skb->remcsum_offload = remcsum; - need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); - if (need_csum) - skb->encap_hdr_csum = 1; + /* Try to offload checksum if possible */ + offload_csum = !!(need_csum && + (skb->dev->features & + (is_ipv6 ? NETIF_F_V6_CSUM : NETIF_F_V4_CSUM))); /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & features; @@ -72,11 +78,21 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, do { struct udphdr *uh; int len; - - skb_reset_inner_headers(skb); - skb->encapsulation = 1; + __be32 delta; + + if (dont_encap) { + skb->encapsulation = 0; + skb->ip_summed = CHECKSUM_NONE; + } else { + /* Only set up inner headers if we might be offloading + * inner checksum. + */ + skb_reset_inner_headers(skb); + skb->encapsulation = 1; + } skb->mac_len = mac_len; + skb->protocol = protocol; skb_push(skb, outer_hlen); skb_reset_mac_header(skb); @@ -86,19 +102,36 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, uh = udp_hdr(skb); uh->len = htons(len); - if (need_csum) { - __be32 delta = htonl(oldlen + len); + if (!need_csum) + continue; - uh->check = ~csum_fold((__force __wsum) - ((__force u32)uh->check + - (__force u32)delta)); + delta = htonl(oldlen + len); + + uh->check = ~csum_fold((__force __wsum) + ((__force u32)uh->check + + (__force u32)delta)); + if (offload_csum) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + } else if (remcsum) { + /* Need to calculate checksum from scratch, + * inner checksums are never when doing + * remote_checksum_offload. + */ + + skb->csum = skb_checksum(skb, udp_offset, + skb->len - udp_offset, + 0); + uh->check = csum_fold(skb->csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + } else { uh->check = gso_make_checksum(skb, ~uh->check); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } - - skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; @@ -134,7 +167,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, } segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, - protocol); + protocol, is_ipv6); out_unlock: rcu_read_unlock(); @@ -172,9 +205,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | + SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_IPIP | - SKB_GSO_GRE | SKB_GSO_GRE_CSUM | - SKB_GSO_MPLS) || + SKB_GSO_GRE | SKB_GSO_GRE_CSUM) || !(type & (SKB_GSO_UDP)))) goto out; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0169ccf5aa4fec8f691ea3bed77b4c7e5587d2a5..f7c8bbeb27b704c0106f714d5a0677c27d3346e0 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1170,6 +1170,9 @@ enum { IPV6_SADDR_RULE_PRIVACY, IPV6_SADDR_RULE_ORCHID, IPV6_SADDR_RULE_PREFIX, +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + IPV6_SADDR_RULE_NOT_OPTIMISTIC, +#endif IPV6_SADDR_RULE_MAX }; @@ -1197,6 +1200,15 @@ static inline int ipv6_saddr_preferred(int type) return 0; } +static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev) +{ +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic; +#else + return false; +#endif +} + static int ipv6_get_saddr_eval(struct net *net, struct ipv6_saddr_score *score, struct ipv6_saddr_dst *dst, @@ -1257,10 +1269,16 @@ static int ipv6_get_saddr_eval(struct net *net, score->scopedist = ret; break; case IPV6_SADDR_RULE_PREFERRED: + { /* Rule 3: Avoid deprecated and optimistic addresses */ + u8 avoid = IFA_F_DEPRECATED; + + if (!ipv6_use_optimistic_addr(score->ifa->idev)) + avoid |= IFA_F_OPTIMISTIC; ret = ipv6_saddr_preferred(score->addr_type) || - !(score->ifa->flags & (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)); + !(score->ifa->flags & avoid); break; + } #ifdef CONFIG_IPV6_MIP6 case IPV6_SADDR_RULE_HOA: { @@ -1306,6 +1324,14 @@ static int ipv6_get_saddr_eval(struct net *net, ret = score->ifa->prefix_len; score->matchlen = ret; break; +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + case IPV6_SADDR_RULE_NOT_OPTIMISTIC: + /* Optimistic addresses still have lower precedence than other + * preferred addresses. + */ + ret = !(score->ifa->flags & IFA_F_OPTIMISTIC); + break; +#endif default: ret = 0; } @@ -1385,10 +1411,8 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, if (unlikely(score->addr_type == IPV6_ADDR_ANY || score->addr_type & IPV6_ADDR_MULTICAST)) { - LIMIT_NETDEBUG(KERN_DEBUG - "ADDRCONF: unspecified / multicast address " - "assigned as unicast address on %s", - dev->name); + net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s", + dev->name); continue; } @@ -2315,8 +2339,8 @@ ok: else stored_lft = 0; if (!update_lft && !create && stored_lft) { - const u32 minimum_lft = min( - stored_lft, (u32)MIN_VALID_LIFETIME); + const u32 minimum_lft = min_t(u32, + stored_lft, MIN_VALID_LIFETIME); valid_lft = max(valid_lft, minimum_lft); /* RFC4862 Section 5.5.3e: @@ -2519,7 +2543,8 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags, if (!dev) return -ENODEV; - if ((idev = __in6_dev_get(dev)) == NULL) + idev = __in6_dev_get(dev); + if (idev == NULL) return -ENXIO; read_lock_bh(&idev->lock); @@ -2666,7 +2691,8 @@ static void init_loopback(struct net_device *dev) ASSERT_RTNL(); - if ((idev = ipv6_find_idev(dev)) == NULL) { + idev = ipv6_find_idev(dev); + if (idev == NULL) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2789,7 +2815,8 @@ static void addrconf_sit_config(struct net_device *dev) * our v4 addrs in the tunnel */ - if ((idev = ipv6_find_idev(dev)) == NULL) { + idev = ipv6_find_idev(dev); + if (idev == NULL) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2813,7 +2840,8 @@ static void addrconf_gre_config(struct net_device *dev) ASSERT_RTNL(); - if ((idev = ipv6_find_idev(dev)) == NULL) { + idev = ipv6_find_idev(dev); + if (idev == NULL) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -3222,8 +3250,15 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) * Optimistic nodes can start receiving * Frames right away */ - if (ifp->flags & IFA_F_OPTIMISTIC) + if (ifp->flags & IFA_F_OPTIMISTIC) { ip6_ins_rt(ifp->rt); + if (ipv6_use_optimistic_addr(idev)) { + /* Because optimistic nodes can use this address, + * notify listeners. If DAD fails, RTM_DELADDR is sent. + */ + ipv6_ifa_notify(RTM_NEWADDR, ifp); + } + } addrconf_dad_kick(ifp); out: @@ -4330,6 +4365,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad; + array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; @@ -5156,6 +5192,14 @@ static struct addrconf_sysctl_table .proc_handler = proc_dointvec, }, + { + .procname = "use_optimistic", + .data = &ipv6_devconf.use_optimistic, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + + }, #endif #ifdef CONFIG_IPV6_MROUTE { @@ -5336,10 +5380,8 @@ static void __net_exit addrconf_exit_net(struct net *net) __addrconf_sysctl_unregister(net->ipv6.devconf_dflt); __addrconf_sysctl_unregister(net->ipv6.devconf_all); #endif - if (!net_eq(net, &init_net)) { - kfree(net->ipv6.devconf_dflt); - kfree(net->ipv6.devconf_all); - } + kfree(net->ipv6.devconf_dflt); + kfree(net->ipv6.devconf_all); } static struct pernet_operations addrconf_ops = { diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 6d16eb0e0c7f938822de68bd57674665dcdcbddf..a6727add26240e9980ce8da620a6e5bf5cddaf8d 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -272,10 +272,9 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) ipv6_rearrange_destopt(iph, exthdr.opth); case NEXTHDR_HOP: if (!zero_out_mutable_opts(exthdr.opth)) { - LIMIT_NETDEBUG( - KERN_WARNING "overrun %sopts\n", - nexthdr == NEXTHDR_HOP ? - "hop" : "dest"); + net_dbg_ratelimited("overrun %sopts\n", + nexthdr == NEXTHDR_HOP ? + "hop" : "dest"); return -EINVAL; } break; @@ -354,7 +353,8 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) ahp = x->data; ahash = ahp->ahash; - if ((err = skb_cow_data(skb, 0, &trailer)) < 0) + err = skb_cow_data(skb, 0, &trailer); + if (err < 0) goto out; nfrags = err; @@ -560,8 +560,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) if (!pskb_may_pull(skb, ah_hlen)) goto out; - - if ((err = skb_cow_data(skb, 0, &trailer)) < 0) + err = skb_cow_data(skb, 0, &trailer); + if (err < 0) goto out; nfrags = err; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 2cdc38338be3fda267d7137cb20fc8bbe72c4466..100c589a2a6cf951bdb8a7a2e56b087fb4fe56bf 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -325,6 +325,16 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) kfree_skb(skb); } +static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb) +{ + int ifindex = skb->dev ? skb->dev->ifindex : -1; + + if (skb->protocol == htons(ETH_P_IPV6)) + IP6CB(skb)->iif = ifindex; + else + PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex; +} + /* * Handle MSG_ERRQUEUE */ @@ -351,7 +361,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_TRUNC; copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; @@ -388,8 +398,12 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) sin->sin6_family = AF_INET6; sin->sin6_flowinfo = 0; sin->sin6_port = 0; - if (np->rxopt.all) + if (np->rxopt.all) { + if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP && + serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6) + ip6_datagram_prepare_pktinfo_errqueue(skb); ip6_datagram_recv_common_ctl(sk, msg, skb); + } if (skb->protocol == htons(ETH_P_IPV6)) { sin->sin6_addr = ipv6_hdr(skb)->saddr; if (np->rxopt.all) @@ -445,7 +459,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, msg->msg_flags |= MSG_TRUNC; copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; @@ -491,7 +505,10 @@ void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg, ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr, &src_info.ipi6_addr); } - put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); + + if (src_info.ipi6_ifindex >= 0) + put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, + sizeof(src_info), &src_info); } } @@ -640,7 +657,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, int len; int err = 0; - for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { + for_each_cmsghdr(cmsg, msg) { int addr_type; if (!CMSG_OK(msg, cmsg)) { @@ -893,8 +910,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, break; } default: - LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n", - cmsg->cmsg_type); + net_dbg_ratelimited("invalid cmsg type: %d\n", + cmsg->cmsg_type); err = -EINVAL; goto exit_f; } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 83fc3a385a26782c316914c4f8aafab78fe1a5e2..e48f2c7c5c596b0a364636b7ed6d6e693e9e925f 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -286,8 +286,8 @@ static int esp_input_done2(struct sk_buff *skb, int err) err = -EINVAL; padlen = nexthdr[0]; if (padlen + 2 + alen >= elen) { - LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage " - "padlen=%d, elen=%d\n", padlen + 2, elen - alen); + net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", + padlen + 2, elen - alen); goto out; } @@ -345,7 +345,8 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) goto out; } - if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { + nfrags = skb_cow_data(skb, 0, &trailer); + if (nfrags < 0) { ret = -EINVAL; goto out; } @@ -544,12 +545,12 @@ static int esp_init_authenc(struct xfrm_state *x) BUG_ON(!aalg_desc); err = -EINVAL; - if (aalg_desc->uinfo.auth.icv_fullbits/8 != + if (aalg_desc->uinfo.auth.icv_fullbits / 8 != crypto_aead_authsize(aead)) { - NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", - x->aalg->alg_name, - crypto_aead_authsize(aead), - aalg_desc->uinfo.auth.icv_fullbits/8); + pr_info("ESP: %s digestsize %u != %hu\n", + x->aalg->alg_name, + crypto_aead_authsize(aead), + aalg_desc->uinfo.auth.icv_fullbits / 8); goto free_key; } diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index bfde361b613400df742c21225a1b4b44744ece65..a7bbbe45570b287eb05b9f13d0a73a830b767dd2 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -47,7 +47,7 @@ #include #endif -#include +#include /* * Parsing tlv encoded headers. @@ -184,7 +184,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) int ret; if (opt->dsthao) { - LIMIT_NETDEBUG(KERN_DEBUG "hao duplicated\n"); + net_dbg_ratelimited("hao duplicated\n"); goto discard; } opt->dsthao = opt->dst1; @@ -193,14 +193,14 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); if (hao->length != 16) { - LIMIT_NETDEBUG( - KERN_DEBUG "hao invalid option length = %d\n", hao->length); + net_dbg_ratelimited("hao invalid option length = %d\n", + hao->length); goto discard; } if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) { - LIMIT_NETDEBUG( - KERN_DEBUG "hao is not an unicast addr: %pI6\n", &hao->addr); + net_dbg_ratelimited("hao is not an unicast addr: %pI6\n", + &hao->addr); goto discard; } @@ -551,8 +551,8 @@ static bool ipv6_hop_ra(struct sk_buff *skb, int optoff) memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra)); return true; } - LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", - nh[optoff + 1]); + net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n", + nh[optoff + 1]); kfree_skb(skb); return false; } @@ -566,8 +566,8 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) u32 pkt_len; if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { - LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", - nh[optoff+1]); + net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", + nh[optoff+1]); IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); goto drop; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 97ae70077a4f226bac3da0f2f94f6efb21d2b246..d674152b6ede5d6134f1a89cb8ebaa6a81308c73 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -243,7 +243,8 @@ int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *icmp6h; int err = 0; - if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) + skb = skb_peek(&sk->sk_write_queue); + if (skb == NULL) goto out; icmp6h = icmp6_hdr(skb); @@ -338,7 +339,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net, * anycast. */ if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) { - LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: acast source\n"); + net_dbg_ratelimited("icmp6_send: acast source\n"); dst_release(dst); return ERR_PTR(-EINVAL); } @@ -452,7 +453,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) * and anycast addresses will be checked later. */ if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) { - LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: addr_any/mcast source\n"); + net_dbg_ratelimited("icmp6_send: addr_any/mcast source\n"); return; } @@ -460,7 +461,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) * Never answer to a ICMP packet. */ if (is_ineligible(skb)) { - LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: no reply to icmp error\n"); + net_dbg_ratelimited("icmp6_send: no reply to icmp error\n"); return; } @@ -509,7 +510,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) len = skb->len - msg.offset; len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr)); if (len < 0) { - LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n"); + net_dbg_ratelimited("icmp: len problem\n"); goto out_dst_release; } @@ -679,6 +680,7 @@ static int icmpv6_rcv(struct sk_buff *skb) const struct in6_addr *saddr, *daddr; struct icmp6hdr *hdr; u8 type; + bool success = false; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); @@ -706,9 +708,8 @@ static int icmpv6_rcv(struct sk_buff *skb) daddr = &ipv6_hdr(skb)->daddr; if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) { - LIMIT_NETDEBUG(KERN_DEBUG - "ICMPv6 checksum failed [%pI6c > %pI6c]\n", - saddr, daddr); + net_dbg_ratelimited("ICMPv6 checksum failed [%pI6c > %pI6c]\n", + saddr, daddr); goto csum_error; } @@ -727,7 +728,7 @@ static int icmpv6_rcv(struct sk_buff *skb) break; case ICMPV6_ECHO_REPLY: - ping_rcv(skb); + success = ping_rcv(skb); break; case ICMPV6_PKT_TOOBIG: @@ -781,7 +782,7 @@ static int icmpv6_rcv(struct sk_buff *skb) if (type & ICMPV6_INFOMSG_MASK) break; - LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n"); + net_dbg_ratelimited("icmpv6: msg of unknown type\n"); /* * error of unknown type. @@ -791,7 +792,14 @@ static int icmpv6_rcv(struct sk_buff *skb) icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); } - kfree_skb(skb); + /* until the v6 path can be better sorted assume failure and + * preserve the status quo behaviour for the rest of the paths to here + */ + if (success) + consume_skb(skb); + else + kfree_skb(skb); + return 0; csum_error: @@ -1009,4 +1017,3 @@ struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) return table; } #endif - diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 3dd7d4ebd7cd9dff75c7a89a3a872cf8a073543b..2f780cba6e1240b06e4fc64eb20c949ab3646558 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -654,7 +654,11 @@ release: goto done; err = -ENOMEM; - if (sfl1 == NULL || (err = mem_check(sk)) != 0) + if (sfl1 == NULL) + goto done; + + err = mem_check(sk); + if (err != 0) goto done; fl1 = fl_intern(net, fl, freq.flr_label); @@ -769,10 +773,9 @@ static void ip6fl_seq_stop(struct seq_file *seq, void *v) static int ip6fl_seq_show(struct seq_file *seq, void *v) { struct ip6fl_iter_state *state = ip6fl_seq_private(seq); - if (v == SEQ_START_TOKEN) - seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n", - "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); - else { + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n"); + } else { struct ip6_flowlabel *fl = v; seq_printf(seq, "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 0e32d2e1bdbfecabd3ae7e8ddb33a99f51cd7b51..13cda4c6313bad7ecca5b20b5be7c8ac28bcf1ab 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -902,7 +902,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, struct net_device_stats *stats = &t->dev->stats; int ret; - if (!ip6_tnl_xmit_ctl(t)) + if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) goto tx_err; switch (skb->protocol) { diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index a3084ab5df6cd1b343af6d725975082f5c4b6eb6..aacdcb4dc7624117561ce9720fa4734ed06ce5a0 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -220,7 +220,8 @@ resubmit: nexthdr = skb_network_header(skb)[nhoff]; raw = raw6_local_deliver(skb, nexthdr); - if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) { + ipprot = rcu_dereference(inet6_protos[nexthdr]); + if (ipprot != NULL) { int ret; if (ipprot->flags & INET6_PROTO_FINAL) { diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 01e12d0d8fcc9284403629fd17d7de010463d480..46d452a56d3e1ce9b1a0f9f8d8edfede3675bf21 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -79,7 +79,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, SKB_GSO_SIT | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | - SKB_GSO_MPLS | + SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_TCPV6 | 0))) goto out; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 8e950c250ada0785323c3e270a61c2e1b7a7b6f3..ce69a12ae48c29276871dacb30eebea086b291a3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -747,13 +747,11 @@ slow_path: if (len < left) { len &= ~7; } - /* - * Allocate buffer. - */ - if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + - hroom + troom, GFP_ATOMIC)) == NULL) { - NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); + /* Allocate buffer */ + frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + + hroom + troom, GFP_ATOMIC); + if (!frag) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; @@ -900,7 +898,8 @@ static int ip6_dst_lookup_tail(struct sock *sk, if (*dst == NULL) *dst = ip6_route_output(net, sk, fl6); - if ((err = (*dst)->error)) + err = (*dst)->error; + if (err) goto out_err_release; if (ipv6_addr_any(&fl6->saddr)) { @@ -948,7 +947,8 @@ static int ip6_dst_lookup_tail(struct sock *sk, memcpy(&fl_gw6, fl6, sizeof(struct flowi6)); memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr)); *dst = ip6_route_output(net, sk, &fl_gw6); - if ((err = (*dst)->error)) + err = (*dst)->error; + if (err) goto out_err_release; } } @@ -1056,7 +1056,8 @@ static inline int ip6_ufo_append_data(struct sock *sk, * device, so create one single skb packet containing complete * udp datagram */ - if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { + skb = skb_peek_tail(&sk->sk_write_queue); + if (skb == NULL) { skb = sock_alloc_send_skb(sk, hh_len + fragheaderlen + transhdrlen + 20, (flags & MSG_DONTWAIT), &err); @@ -1536,7 +1537,8 @@ int ip6_push_pending_frames(struct sock *sk) unsigned char proto = fl6->flowi6_proto; int err = 0; - if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL) + skb = __skb_dequeue(&sk->sk_write_queue); + if (skb == NULL) goto out; tail_skb = &(skb_shinfo(skb)->frag_list); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9cb94cfa0ae71d05a5bd924e58eb225e21dbb62d..92b3da571980670e4f343a278eec49521057be1e 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -183,6 +183,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ unsigned int hash = HASH(remote, local); struct ip6_tnl *t; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + struct in6_addr any; for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { if (ipv6_addr_equal(local, &t->parms.laddr) && @@ -190,6 +191,22 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ (t->dev->flags & IFF_UP)) return t; } + + memset(&any, 0, sizeof(any)); + hash = HASH(&any, local); + for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (ipv6_addr_equal(local, &t->parms.laddr) && + (t->dev->flags & IFF_UP)) + return t; + } + + hash = HASH(remote, &any); + for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (ipv6_addr_equal(remote, &t->parms.raddr) && + (t->dev->flags & IFF_UP)) + return t; + } + t = rcu_dereference(ip6n->tnls_wc[0]); if (t && (t->dev->flags & IFF_UP)) return t; @@ -474,6 +491,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, int rel_msg = 0; u8 rel_type = ICMPV6_DEST_UNREACH; u8 rel_code = ICMPV6_ADDR_UNREACH; + u8 tproto; __u32 rel_info = 0; __u16 len; int err = -ENOENT; @@ -483,11 +501,12 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, processing of the error. */ rcu_read_lock(); - if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, - &ipv6h->saddr)) == NULL) + t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr); + if (t == NULL) goto out; - if (t->parms.proto != ipproto && t->parms.proto != 0) + tproto = ACCESS_ONCE(t->parms.proto); + if (tproto != ipproto && tproto != 0) goto out; err = 0; @@ -531,7 +550,8 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; - if ((len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { + len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len); + if (len > mtu) { rel_type = ICMPV6_PKT_TOOBIG; rel_code = 0; rel_info = mtu; @@ -788,15 +808,16 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, { struct ip6_tnl *t; const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + u8 tproto; int err; rcu_read_lock(); - - if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, - &ipv6h->daddr)) != NULL) { + t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); + if (t != NULL) { struct pcpu_sw_netstats *tstats; - if (t->parms.proto != ipproto && t->parms.proto != 0) { + tproto = ACCESS_ONCE(t->parms.proto); + if (tproto != ipproto && tproto != 0) { rcu_read_unlock(); goto discard; } @@ -902,24 +923,28 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); } -int ip6_tnl_xmit_ctl(struct ip6_tnl *t) +int ip6_tnl_xmit_ctl(struct ip6_tnl *t, + const struct in6_addr *laddr, + const struct in6_addr *raddr) { struct __ip6_tnl_parm *p = &t->parms; int ret = 0; struct net *net = t->net; - if (p->flags & IP6_TNL_F_CAP_XMIT) { + if ((p->flags & IP6_TNL_F_CAP_XMIT) || + ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && + (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) { struct net_device *ldev = NULL; rcu_read_lock(); if (p->link) ldev = dev_get_by_index_rcu(net, p->link); - if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) + if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0))) pr_warn("%s xmit: Local address not yet configured!\n", p->name); - else if (!ipv6_addr_is_multicast(&p->raddr) && - unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) + else if (!ipv6_addr_is_multicast(raddr) && + unlikely(ipv6_chk_addr(net, raddr, NULL, 0))) pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", p->name); else @@ -968,8 +993,34 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, u8 proto; int err = -1; - if (!fl6->flowi6_mark) + /* NBMA tunnel */ + if (ipv6_addr_any(&t->parms.raddr)) { + struct in6_addr *addr6; + struct neighbour *neigh; + int addr_type; + + if (!skb_dst(skb)) + goto tx_err_link_failure; + + neigh = dst_neigh_lookup(skb_dst(skb), + &ipv6_hdr(skb)->daddr); + if (!neigh) + goto tx_err_link_failure; + + addr6 = (struct in6_addr *)&neigh->primary_key; + addr_type = ipv6_addr_type(addr6); + + if (addr_type == IPV6_ADDR_ANY) + addr6 = &ipv6_hdr(skb)->daddr; + + memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); + neigh_release(neigh); + } else if (!fl6->flowi6_mark) dst = ip6_tnl_dst_check(t); + + if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) + goto tx_err_link_failure; + if (!dst) { ndst = ip6_route_output(net, NULL, fl6); @@ -1018,7 +1069,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb; - if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) + new_skb = skb_realloc_headroom(skb, max_headroom); + if (!new_skb) goto tx_err_dst_release; if (skb->sk) @@ -1075,10 +1127,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) struct flowi6 fl6; __u8 dsfield; __u32 mtu; + u8 tproto; int err; - if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) || - !ip6_tnl_xmit_ctl(t)) + tproto = ACCESS_ONCE(t->parms.proto); + if (tproto != IPPROTO_IPIP && tproto != 0) return -1; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) @@ -1117,10 +1170,12 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) struct flowi6 fl6; __u8 dsfield; __u32 mtu; + u8 tproto; int err; - if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || - !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) + tproto = ACCESS_ONCE(t->parms.proto); + if ((tproto != IPPROTO_IPV6 && tproto != 0) || + ip6_tnl_addr_conflict(t, ipv6h)) return -1; offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); @@ -1282,6 +1337,14 @@ static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) return err; } +static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) +{ + /* for default tnl0 device allow to change only the proto */ + t->parms.proto = p->proto; + netdev_state_change(t->dev); + return 0; +} + static void ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) { @@ -1381,7 +1444,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); - if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { + if (cmd == SIOCCHGTUNNEL) { if (t != NULL) { if (t->dev != dev) { err = -EEXIST; @@ -1389,8 +1452,10 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } else t = netdev_priv(dev); - - err = ip6_tnl_update(t, &p1); + if (dev == ip6n->fb_tnl_dev) + err = ip6_tnl0_update(t, &p1); + else + err = ip6_tnl_update(t, &p1); } if (t) { err = 0; diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index bcda14de7f84822a17b382a6256e49b665ed7a83..ace10d0b3aacb8e484e78f4f9a69113727ba3f65 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -95,6 +95,7 @@ vti6_tnl_lookup(struct net *net, const struct in6_addr *remote, unsigned int hash = HASH(remote, local); struct ip6_tnl *t; struct vti6_net *ip6n = net_generic(net, vti6_net_id); + struct in6_addr any; for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { if (ipv6_addr_equal(local, &t->parms.laddr) && @@ -102,6 +103,22 @@ vti6_tnl_lookup(struct net *net, const struct in6_addr *remote, (t->dev->flags & IFF_UP)) return t; } + + memset(&any, 0, sizeof(any)); + hash = HASH(&any, local); + for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (ipv6_addr_equal(local, &t->parms.laddr) && + (t->dev->flags & IFF_UP)) + return t; + } + + hash = HASH(remote, &any); + for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (ipv6_addr_equal(remote, &t->parms.raddr) && + (t->dev->flags & IFF_UP)) + return t; + } + t = rcu_dereference(ip6n->tnls_wc[0]); if (t && (t->dev->flags & IFF_UP)) return t; @@ -287,8 +304,8 @@ static int vti6_rcv(struct sk_buff *skb) const struct ipv6hdr *ipv6h = ipv6_hdr(skb); rcu_read_lock(); - if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, - &ipv6h->daddr)) != NULL) { + t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); + if (t != NULL) { if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) { rcu_read_unlock(); goto discard; @@ -412,6 +429,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) struct net_device_stats *stats = &t->dev->stats; struct dst_entry *dst = skb_dst(skb); struct net_device *tdev; + struct xfrm_state *x; int err = -1; if (!dst) @@ -425,7 +443,12 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) goto tx_err_link_failure; } - if (!vti6_state_check(dst->xfrm, &t->parms.raddr, &t->parms.laddr)) + x = dst->xfrm; + if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr)) + goto tx_err_link_failure; + + if (!ip6_tnl_xmit_ctl(t, (const struct in6_addr *)&x->props.saddr, + (const struct in6_addr *)&x->id.daddr)) goto tx_err_link_failure; tdev = dst->dev; @@ -480,7 +503,7 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ipv6h = ipv6_hdr(skb); if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || - !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h)) + vti6_addr_conflict(t, ipv6h)) goto tx_err; xfrm_decode_session(skb, &fl, AF_INET6); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 1a01d79b86980b1bc00d3692a061f614e7107773..722669754bbfb89ab04ea7677f1744a967c7e13e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2094,7 +2094,7 @@ static void ip6_mr_forward(struct net *net, struct mr6_table *mrt, if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) { struct mfc6_cache *cache_proxy; - /* For an (*,G) entry, we only check that the incomming + /* For an (*,G) entry, we only check that the incoming * interface is part of the static tree. */ cache_proxy = ip6mr_cache_find_any_parent(mrt, vif); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index e1a9583bb4191f44b021e3d39483ed29d1f22fc4..66980d8d98d1f5b3ef7a50dc33cb9b617f25604d 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -110,12 +110,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); } - opt = xchg(&inet6_sk(sk)->opt, opt); - } else { - spin_lock(&sk->sk_dst_lock); - opt = xchg(&inet6_sk(sk)->opt, opt); - spin_unlock(&sk->sk_dst_lock); } + opt = xchg(&inet6_sk(sk)->opt, opt); sk_dst_reset(sk); return opt; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index ed2c4e400b46bf762ff5379c3d62d69d940e5dd4..5ce107c8aab3477ba20c281c5eb859c572e7347f 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2824,11 +2824,7 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); if (v == SEQ_START_TOKEN) { - seq_printf(seq, - "%3s %6s " - "%32s %32s %6s %6s\n", "Idx", - "Device", "Multicast Address", - "Source Address", "INC", "EXC"); + seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n"); } else { seq_printf(seq, "%3d %6.6s %pi6 %pi6 %6lu %6lu\n", diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index f61429d391d32693f3fb92fd75c630c01520f776..b9779d441b1246906b40812fdf30a980a4673143 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -97,16 +97,17 @@ static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) return -1; if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { - LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", - mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); + net_dbg_ratelimited("mip6: MH message too short: %d vs >=%d\n", + mh->ip6mh_hdrlen, + mip6_mh_len(mh->ip6mh_type)); mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) + skb_network_header_len(skb)); return -1; } if (mh->ip6mh_proto != IPPROTO_NONE) { - LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", - mh->ip6mh_proto); + net_dbg_ratelimited("mip6: MH invalid payload proto = %d\n", + mh->ip6mh_proto); mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) + skb_network_header_len(skb)); return -1; @@ -288,7 +289,7 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, * XXX: packet if HAO exists. */ if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) { - LIMIT_NETDEBUG(KERN_WARNING "mip6: hao exists already, override\n"); + net_dbg_ratelimited("mip6: hao exists already, override\n"); return offset; } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 4cb45c1079a29f4b7b59201e829905a5c65ac9fe..682866777d5383dabaf6e3ae2f6f2deef261f557 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -162,7 +162,8 @@ static void ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data) memcpy(opt+2, data, data_len); data_len += 2; opt += data_len; - if ((space -= data_len) > 0) + space -= data_len; + if (space > 0) memset(opt, 0, space); } @@ -656,8 +657,8 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) if (skb && ipv6_chk_addr(dev_net(dev), &ipv6_hdr(skb)->saddr, dev, 1)) saddr = &ipv6_hdr(skb)->saddr; - - if ((probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES)) < 0) { + probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES); + if (probes < 0) { if (!(neigh->nud_state & NUD_VALID)) { ND_PRINTK(1, dbg, "%s: trying to ucast probe in NUD_INVALID: %pI6\n", @@ -1763,7 +1764,7 @@ int __init ndisc_init(void) /* * Initialize the neighbour table */ - neigh_table_init(&nd_tbl); + neigh_table_init(NEIGH_ND_TABLE, &nd_tbl); #ifdef CONFIG_SYSCTL err = neigh_sysctl_register(NULL, &nd_tbl.parms, @@ -1796,6 +1797,6 @@ void ndisc_cleanup(void) #ifdef CONFIG_SYSCTL neigh_sysctl_unregister(&nd_tbl.parms); #endif - neigh_table_clear(&nd_tbl); + neigh_table_clear(NEIGH_ND_TABLE, &nd_tbl); unregister_pernet_subsys(&ndisc_net_ops); } diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index d38e6a8d8b9fb82ec7d583a5ab2abc652838d470..398377a9d0183d297edff70c53dc5cee0a4ab8f9 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -36,7 +36,7 @@ int ip6_route_me_harder(struct sk_buff *skb) err = dst->error; if (err) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); - LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); + net_dbg_ratelimited("ip6_route_me_harder: No more route\n"); dst_release(dst); return err; } diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 6af874fc187f64c57b7e16dfbe97ce94768f7df4..a069822936e6086af56f64d628278440e0279005 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -91,6 +91,15 @@ config NFT_MASQ_IPV6 This is the expression that provides IPv4 masquerading support for nf_tables. +config NFT_REDIR_IPV6 + tristate "IPv6 redirect support for nf_tables" + depends on NF_TABLES_IPV6 + depends on NFT_REDIR + select NF_NAT_REDIRECT + help + This is the expression that provides IPv4 redirect support for + nf_tables. + endif # NF_NAT_IPV6 config IP6_NF_IPTABLES diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index fbb25f01143c8992023ca3fb167e86933b209f84..c36e0a5490de10cd64f5c64571efa13628568199 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o +obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o # matches obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c index 7b17a0be93e7eccb2a26cd3294713d0f1112158d..ddf07e6f59d7de70bcf41ba84880b2c699296db7 100644 --- a/net/ipv6/netfilter/nf_log_ipv6.c +++ b/net/ipv6/netfilter/nf_log_ipv6.c @@ -5,6 +5,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include @@ -398,8 +399,17 @@ static int __init nf_log_ipv6_init(void) if (ret < 0) return ret; - nf_log_register(NFPROTO_IPV6, &nf_ip6_logger); + ret = nf_log_register(NFPROTO_IPV6, &nf_ip6_logger); + if (ret < 0) { + pr_err("failed to register logger\n"); + goto err1; + } + return 0; + +err1: + unregister_pernet_subsys(&nf_log_ipv6_net_ops); + return ret; } static void __exit nf_log_ipv6_exit(void) @@ -412,6 +422,6 @@ module_init(nf_log_ipv6_init); module_exit(nf_log_ipv6_exit); MODULE_AUTHOR("Netfilter Core Team "); -MODULE_DESCRIPTION("Netfilter IPv4 packet logging"); +MODULE_DESCRIPTION("Netfilter IPv6 packet logging"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NF_LOGGER(AF_INET6, 0); diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 015eb8a80766f477ed797d496762f4fb2e6f7661..d05b36440e8be341e3b22cc172acc05962c8a37a 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c new file mode 100644 index 0000000000000000000000000000000000000000..2433a6bfb191c4259bedfe2d697baa618c818b51 --- /dev/null +++ b/net/ipv6/netfilter/nft_redir_ipv6.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014 Arturo Borrero Gonzalez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void nft_redir_ipv6_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt) +{ + struct nft_redir *priv = nft_expr_priv(expr); + struct nf_nat_range range; + unsigned int verdict; + + memset(&range, 0, sizeof(range)); + if (priv->sreg_proto_min) { + range.min_proto.all = (__force __be16) + data[priv->sreg_proto_min].data[0]; + range.max_proto.all = (__force __be16) + data[priv->sreg_proto_max].data[0]; + range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; + } + + range.flags |= priv->flags; + + verdict = nf_nat_redirect_ipv6(pkt->skb, &range, pkt->ops->hooknum); + data[NFT_REG_VERDICT].verdict = verdict; +} + +static struct nft_expr_type nft_redir_ipv6_type; +static const struct nft_expr_ops nft_redir_ipv6_ops = { + .type = &nft_redir_ipv6_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)), + .eval = nft_redir_ipv6_eval, + .init = nft_redir_init, + .dump = nft_redir_dump, + .validate = nft_redir_validate, +}; + +static struct nft_expr_type nft_redir_ipv6_type __read_mostly = { + .family = NFPROTO_IPV6, + .name = "redir", + .ops = &nft_redir_ipv6_ops, + .policy = nft_redir_policy, + .maxattr = NFTA_REDIR_MAX, + .owner = THIS_MODULE, +}; + +static int __init nft_redir_ipv6_module_init(void) +{ + return nft_register_expr(&nft_redir_ipv6_type); +} + +static void __exit nft_redir_ipv6_module_exit(void) +{ + nft_unregister_expr(&nft_redir_ipv6_type); +} + +module_init(nft_redir_ipv6_module_init); +module_exit(nft_redir_ipv6_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arturo Borrero Gonzalez "); +MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir"); diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c index 0bc19fa878213318943039eee1a9dc86c9b42510..f732859241444118d6cc357843482f70b432c786 100644 --- a/net/ipv6/netfilter/nft_reject_ipv6.c +++ b/net/ipv6/netfilter/nft_reject_ipv6.c @@ -19,9 +19,9 @@ #include #include -void nft_reject_ipv6_eval(const struct nft_expr *expr, - struct nft_data data[NFT_REG_MAX + 1], - const struct nft_pktinfo *pkt) +static void nft_reject_ipv6_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt) { struct nft_reject *priv = nft_expr_priv(expr); struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); @@ -38,7 +38,6 @@ void nft_reject_ipv6_eval(const struct nft_expr *expr, data[NFT_REG_VERDICT].verdict = NF_DROP; } -EXPORT_SYMBOL_GPL(nft_reject_ipv6_eval); static struct nft_expr_type nft_reject_ipv6_type; static const struct nft_expr_ops nft_reject_ipv6_ops = { diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 5b7a1ed2aba95a837a0063b96cb9803ff8925f0c..2d3148378a1f6a7315b135090bf1fed57f181a3b 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -163,7 +163,8 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, pfh.icmph.checksum = 0; pfh.icmph.un.echo.id = inet->inet_sport; pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence; - pfh.iov = msg->msg_iov; + /* XXX: stripping const */ + pfh.iov = (struct iovec *)msg->msg_iter.iov; pfh.wcheck = 0; pfh.family = AF_INET6; diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 1752cd0b48820367ad64cd926d8ea16a6c69341a..679253d0af8427008f72832e0c1b61341de45bd6 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -136,6 +136,7 @@ static const struct snmp_mib snmp6_udp6_list[] = { SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS), SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS), + SNMP_MIB_ITEM("Udp6IgnoredMulti", UDP_MIB_IGNOREDMULTI), SNMP_MIB_SENTINEL }; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 896af8807979fad382ba65724ccad0bf040c992f..ee25631f8c293db3db95a0992fa2b319872afb30 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -486,13 +486,13 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, } if (skb_csum_unnecessary(skb)) { - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); } else if (msg->msg_flags&MSG_TRUNC) { if (__skb_checksum_complete(skb)) goto csum_copy_err; - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); } else { - err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov); + err = skb_copy_and_csum_datagram_msg(skb, 0, msg); if (err == -EINVAL) goto csum_copy_err; } @@ -548,7 +548,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, if (!rp->checksum) goto send; - if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) + skb = skb_peek(&sk->sk_write_queue); + if (!skb) goto out; offset = rp->offset; @@ -671,65 +672,62 @@ error: return err; } -static int rawv6_probe_proto_opt(struct flowi6 *fl6, struct msghdr *msg) +struct raw6_frag_vec { + struct msghdr *msg; + int hlen; + char c[4]; +}; + +static int rawv6_probe_proto_opt(struct raw6_frag_vec *rfv, struct flowi6 *fl6) { - struct iovec *iov; - u8 __user *type = NULL; - u8 __user *code = NULL; - u8 len = 0; - int probed = 0; - int i; - - if (!msg->msg_iov) - return 0; + int err = 0; + switch (fl6->flowi6_proto) { + case IPPROTO_ICMPV6: + rfv->hlen = 2; + err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen); + if (!err) { + fl6->fl6_icmp_type = rfv->c[0]; + fl6->fl6_icmp_code = rfv->c[1]; + } + break; + case IPPROTO_MH: + rfv->hlen = 4; + err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen); + if (!err) + fl6->fl6_mh_type = rfv->c[2]; + } + return err; +} - for (i = 0; i < msg->msg_iovlen; i++) { - iov = &msg->msg_iov[i]; - if (!iov) - continue; +static int raw6_getfrag(void *from, char *to, int offset, int len, int odd, + struct sk_buff *skb) +{ + struct raw6_frag_vec *rfv = from; - switch (fl6->flowi6_proto) { - case IPPROTO_ICMPV6: - /* check if one-byte field is readable or not. */ - if (iov->iov_base && iov->iov_len < 1) - break; - - if (!type) { - type = iov->iov_base; - /* check if code field is readable or not. */ - if (iov->iov_len > 1) - code = type + 1; - } else if (!code) - code = iov->iov_base; - - if (type && code) { - if (get_user(fl6->fl6_icmp_type, type) || - get_user(fl6->fl6_icmp_code, code)) - return -EFAULT; - probed = 1; - } - break; - case IPPROTO_MH: - if (iov->iov_base && iov->iov_len < 1) - break; - /* check if type field is readable or not. */ - if (iov->iov_len > 2 - len) { - u8 __user *p = iov->iov_base; - if (get_user(fl6->fl6_mh_type, &p[2 - len])) - return -EFAULT; - probed = 1; - } else - len += iov->iov_len; + if (offset < rfv->hlen) { + int copy = min(rfv->hlen - offset, len); - break; - default: - probed = 1; - break; - } - if (probed) - break; + if (skb->ip_summed == CHECKSUM_PARTIAL) + memcpy(to, rfv->c + offset, copy); + else + skb->csum = csum_block_add( + skb->csum, + csum_partial_copy_nocheck(rfv->c + offset, + to, copy, 0), + odd); + + odd = 0; + offset += copy; + to += copy; + len -= copy; + + if (!len) + return 0; } - return 0; + + offset -= rfv->hlen; + + return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); } static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, @@ -744,6 +742,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; + struct raw6_frag_vec rfv; struct flowi6 fl6; int addr_len = msg->msg_namelen; int hlimit = -1; @@ -847,7 +846,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, opt = ipv6_fixup_options(&opt_space, opt); fl6.flowi6_proto = proto; - err = rawv6_probe_proto_opt(&fl6, msg); + rfv.msg = msg; + rfv.hlen = 0; + err = rawv6_probe_proto_opt(&rfv, &fl6); if (err) goto out; @@ -885,10 +886,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, back_from_confirm: if (inet->hdrincl) - err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl6, &dst, msg->msg_flags); + /* XXX: stripping const */ + err = rawv6_send_hdrinc(sk, (struct iovec *)msg->msg_iter.iov, len, &fl6, &dst, msg->msg_flags); else { lock_sock(sk); - err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, + err = ip6_append_data(sk, raw6_getfrag, &rfv, len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, msg->msg_flags, dontfrag); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 1a157ca2ebc18c5a12519adb9eacd30d508722eb..d7d70e69973b7455fbf2b5cb88a09d428f9c45ea 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -69,7 +69,7 @@ struct ip6frag_skb_cb { #define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb)) -static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) +static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) { return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); } @@ -178,7 +178,7 @@ static void ip6_frag_expire(unsigned long data) ip6_expire_frag_queue(net, fq, &ip6_frags); } -static __inline__ struct frag_queue * +static struct frag_queue * fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst, u8 ecn) { @@ -429,7 +429,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct sk_buff *clone; int i, plen = 0; - if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) + clone = alloc_skb(0, GFP_ATOMIC); + if (clone == NULL) goto out_oom; clone->next = head->next; head->next = clone; @@ -684,21 +685,21 @@ static void ip6_frags_sysctl_unregister(void) unregister_net_sysctl_table(ip6_ctl_header); } #else -static inline int ip6_frags_ns_sysctl_register(struct net *net) +static int ip6_frags_ns_sysctl_register(struct net *net) { return 0; } -static inline void ip6_frags_ns_sysctl_unregister(struct net *net) +static void ip6_frags_ns_sysctl_unregister(struct net *net) { } -static inline int ip6_frags_sysctl_register(void) +static int ip6_frags_sysctl_register(void) { return 0; } -static inline void ip6_frags_sysctl_unregister(void) +static void ip6_frags_sysctl_unregister(void) { } #endif diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a318dd89b6d91a1c373ef0ef0ea8604976c9e36f..c91083156edbe2b631b1fb11e3ead35f1925b8a7 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -772,23 +772,22 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, } #endif -#define BACKTRACK(__net, saddr) \ -do { \ - if (rt == __net->ipv6.ip6_null_entry) { \ - struct fib6_node *pn; \ - while (1) { \ - if (fn->fn_flags & RTN_TL_ROOT) \ - goto out; \ - pn = fn->parent; \ - if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \ - fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \ - else \ - fn = pn; \ - if (fn->fn_flags & RTN_RTINFO) \ - goto restart; \ - } \ - } \ -} while (0) +static struct fib6_node* fib6_backtrack(struct fib6_node *fn, + struct in6_addr *saddr) +{ + struct fib6_node *pn; + while (1) { + if (fn->fn_flags & RTN_TL_ROOT) + return NULL; + pn = fn->parent; + if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) + fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); + else + fn = pn; + if (fn->fn_flags & RTN_RTINFO) + return fn; + } +} static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, @@ -804,8 +803,11 @@ restart: rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags); - BACKTRACK(net, &fl6->saddr); -out: + if (rt == net->ipv6.ip6_null_entry) { + fn = fib6_backtrack(fn, &fl6->saddr); + if (fn) + goto restart; + } dst_use(&rt->dst, jiffies); read_unlock_bh(&table->tb6_lock); return rt; @@ -915,33 +917,48 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, int flags) { - struct fib6_node *fn; + struct fib6_node *fn, *saved_fn; struct rt6_info *rt, *nrt; int strict = 0; int attempts = 3; int err; - int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE; strict |= flags & RT6_LOOKUP_F_IFACE; + if (net->ipv6.devconf_all->forwarding == 0) + strict |= RT6_LOOKUP_F_REACHABLE; -relookup: +redo_fib6_lookup_lock: read_lock_bh(&table->tb6_lock); -restart_2: fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); + saved_fn = fn; -restart: - rt = rt6_select(fn, oif, strict | reachable); +redo_rt6_select: + rt = rt6_select(fn, oif, strict); if (rt->rt6i_nsiblings) - rt = rt6_multipath_select(rt, fl6, oif, strict | reachable); - BACKTRACK(net, &fl6->saddr); - if (rt == net->ipv6.ip6_null_entry || - rt->rt6i_flags & RTF_CACHE) - goto out; + rt = rt6_multipath_select(rt, fl6, oif, strict); + if (rt == net->ipv6.ip6_null_entry) { + fn = fib6_backtrack(fn, &fl6->saddr); + if (fn) + goto redo_rt6_select; + else if (strict & RT6_LOOKUP_F_REACHABLE) { + /* also consider unreachable route */ + strict &= ~RT6_LOOKUP_F_REACHABLE; + fn = saved_fn; + goto redo_rt6_select; + } else { + dst_hold(&rt->dst); + read_unlock_bh(&table->tb6_lock); + goto out2; + } + } dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); + if (rt->rt6i_flags & RTF_CACHE) + goto out2; + if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY))) nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); else if (!(rt->dst.flags & DST_HOST)) @@ -967,15 +984,8 @@ restart: * released someone could insert this route. Relookup. */ ip6_rt_put(rt); - goto relookup; + goto redo_fib6_lookup_lock; -out: - if (reachable) { - reachable = 0; - goto restart_2; - } - dst_hold(&rt->dst); - read_unlock_bh(&table->tb6_lock); out2: rt->dst.lastuse = jiffies; rt->dst.__use++; @@ -1235,10 +1245,12 @@ restart: rt = net->ipv6.ip6_null_entry; else if (rt->dst.error) { rt = net->ipv6.ip6_null_entry; - goto out; + } else if (rt == net->ipv6.ip6_null_entry) { + fn = fib6_backtrack(fn, &fl6->saddr); + if (fn) + goto restart; } - BACKTRACK(net, &fl6->saddr); -out: + dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index a24557a1c1d8b6da7ee51e862ae4b847d9e744d6..213546bd6d5de5eeccfabe45c6dd4d575b475266 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1241,7 +1241,8 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) goto done; err = -ENOENT; - if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL) + t = ipip6_tunnel_locate(net, &p, 0); + if (t == NULL) goto done; err = -EPERM; if (t == netdev_priv(sitn->fb_tunnel_dev)) @@ -1711,7 +1712,7 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, - tunnel->encap.dport)) + tunnel->encap.flags)) goto nla_put_failure; return 0; @@ -1836,8 +1837,8 @@ static int __net_init sit_init_net(struct net *net) goto err_dev_free; ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn); - - if ((err = register_netdev(sitn->fb_tunnel_dev))) + err = register_netdev(sitn->fb_tunnel_dev); + if (err) goto err_reg_dev; t = netdev_priv(sitn->fb_tunnel_dev); diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 2f25cb6347ca556a8c7418e327bbff2a715c1e6a..7337fc7947e2eba2c5e6eaccbc9cfd660d3a0ccd 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -166,13 +166,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) int mss; struct dst_entry *dst; __u8 rcv_wscale; - bool ecn_ok = false; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; - if (tcp_synq_no_recent_overflow(sk) || - (mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie)) == 0) { + if (tcp_synq_no_recent_overflow(sk)) + goto out; + + mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); + if (mss == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } @@ -183,7 +185,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, 0, NULL); - if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok)) + if (!cookie_timestamp_decode(&tcp_opt)) goto out; ret = NULL; @@ -220,7 +222,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) req->expires = 0UL; req->num_retrans = 0; - ireq->ecn_ok = ecn_ok; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; @@ -261,6 +262,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) dst_metric(dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; + ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); ret = get_cookie_sock(sk, skb, req, dst); out: @@ -269,4 +271,3 @@ out_free: reqsk_free(req); return NULL; } - diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index dc495ae2ead05aca0190bcab8c2d95b58beb2ac9..5ff87805258eb1f0dfa1ed14967b2e1c8f5c4c6a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -787,16 +787,16 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { .queue_hash_add = inet6_csk_reqsk_queue_hash_add, }; -static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, - u32 tsval, u32 tsecr, int oif, - struct tcp_md5sig_key *key, int rst, u8 tclass, - u32 label) +static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq, + u32 ack, u32 win, u32 tsval, u32 tsecr, + int oif, struct tcp_md5sig_key *key, int rst, + u8 tclass, u32 label) { const struct tcphdr *th = tcp_hdr(skb); struct tcphdr *t1; struct sk_buff *buff; struct flowi6 fl6; - struct net *net = dev_net(skb_dst(skb)->dev); + struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); struct sock *ctl_sk = net->ipv6.tcp_sk; unsigned int tot_len = sizeof(struct tcphdr); struct dst_entry *dst; @@ -946,7 +946,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) (th->doff << 2); oif = sk ? sk->sk_bound_dev_if : 0; - tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); + tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); #ifdef CONFIG_TCP_MD5SIG release_sk1: @@ -957,13 +957,13 @@ release_sk1: #endif } -static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, - u32 win, u32 tsval, u32 tsecr, int oif, +static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq, + u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, u8 tclass, u32 label) { - tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass, - label); + tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, + tclass, label); } static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) @@ -971,7 +971,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); - tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, + tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_time_stamp + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), @@ -986,10 +986,10 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ - tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? + tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, - tcp_rsk(req)->rcv_nxt, - req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, + tcp_rsk(req)->rcv_nxt, req->rcv_wnd, + tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0, 0); } @@ -1296,6 +1296,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); + sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { @@ -1325,6 +1326,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) */ if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); + sk_mark_napi_id(sk, skb); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) @@ -1457,7 +1459,7 @@ process: if (sk_filter(sk, skb)) goto discard_and_relse; - sk_mark_napi_id(sk, skb); + sk_incoming_cpu_update(sk); skb->dev = NULL; bh_lock_sock_nested(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f6ba535b6febe40aad990e7f9541446fc573d511..189dc4ae3ecac1b140a7208c4b6de0b956e0b710 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -148,72 +148,85 @@ static inline int compute_score(struct sock *sk, struct net *net, const struct in6_addr *daddr, __be16 dport, int dif) { - int score = -1; + int score; + struct inet_sock *inet; - if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && - sk->sk_family == PF_INET6) { - struct inet_sock *inet = inet_sk(sk); + if (!net_eq(sock_net(sk), net) || + udp_sk(sk)->udp_port_hash != hnum || + sk->sk_family != PF_INET6) + return -1; - score = 0; - if (inet->inet_dport) { - if (inet->inet_dport != sport) - return -1; - score++; - } - if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { - if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) - return -1; - score++; - } - if (!ipv6_addr_any(&sk->sk_v6_daddr)) { - if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) - return -1; - score++; - } - if (sk->sk_bound_dev_if) { - if (sk->sk_bound_dev_if != dif) - return -1; - score++; - } + score = 0; + inet = inet_sk(sk); + + if (inet->inet_dport) { + if (inet->inet_dport != sport) + return -1; + score++; + } + + if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { + if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) + return -1; + score++; + } + + if (!ipv6_addr_any(&sk->sk_v6_daddr)) { + if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) + return -1; + score++; } + + if (sk->sk_bound_dev_if) { + if (sk->sk_bound_dev_if != dif) + return -1; + score++; + } + return score; } #define SCORE2_MAX (1 + 1 + 1) static inline int compute_score2(struct sock *sk, struct net *net, - const struct in6_addr *saddr, __be16 sport, - const struct in6_addr *daddr, unsigned short hnum, - int dif) + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, + unsigned short hnum, int dif) { - int score = -1; + int score; + struct inet_sock *inet; - if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && - sk->sk_family == PF_INET6) { - struct inet_sock *inet = inet_sk(sk); + if (!net_eq(sock_net(sk), net) || + udp_sk(sk)->udp_port_hash != hnum || + sk->sk_family != PF_INET6) + return -1; - if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) + if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) + return -1; + + score = 0; + inet = inet_sk(sk); + + if (inet->inet_dport) { + if (inet->inet_dport != sport) return -1; - score = 0; - if (inet->inet_dport) { - if (inet->inet_dport != sport) - return -1; - score++; - } - if (!ipv6_addr_any(&sk->sk_v6_daddr)) { - if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) - return -1; - score++; - } - if (sk->sk_bound_dev_if) { - if (sk->sk_bound_dev_if != dif) - return -1; - score++; - } + score++; } + + if (!ipv6_addr_any(&sk->sk_v6_daddr)) { + if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) + return -1; + score++; + } + + if (sk->sk_bound_dev_if) { + if (sk->sk_bound_dev_if != dif) + return -1; + score++; + } + return score; } - /* called with read_rcu_lock() */ static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, @@ -357,7 +370,8 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, struct sock *sk; const struct ipv6hdr *iph = ipv6_hdr(skb); - if (unlikely(sk = skb_steal_sock(skb))) + sk = skb_steal_sock(skb); + if (unlikely(sk)) return sk; return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), @@ -424,10 +438,10 @@ try_again: } if (skb_csum_unnecessary(skb)) - err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), - msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), + msg, copied); else { - err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); + err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg); if (err == -EINVAL) goto csum_copy_err; } @@ -577,6 +591,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (!ipv6_addr_any(&sk->sk_v6_daddr)) { sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); + sk_incoming_cpu_update(sk); } rc = sock_queue_rcv_skb(sk, skb); @@ -659,15 +674,13 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { if (up->pcrlen == 0) { /* full coverage was set */ - LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage" - " %d while full coverage %d requested\n", - UDP_SKB_CB(skb)->cscov, skb->len); + net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", + UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { - LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d " - "too small, need min %d\n", - UDP_SKB_CB(skb)->cscov, up->pcrlen); + net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", + UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } @@ -760,9 +773,9 @@ static void udp6_csum_zero_error(struct sk_buff *skb) /* RFC 2460 section 8.1 says that we SHOULD log * this error. Well, it is reasonable. */ - LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", - &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), - &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); + net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", + &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), + &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); } /* @@ -771,7 +784,7 @@ static void udp6_csum_zero_error(struct sk_buff *skb) */ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, - struct udp_table *udptable) + struct udp_table *udptable, int proto) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; const struct udphdr *uh = udp_hdr(skb); @@ -781,6 +794,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, int dif = inet6_iif(skb); unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); + bool inner_flushed = false; if (use_hash2) { hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & @@ -803,6 +817,7 @@ start_lookup: (uh->check || udp_sk(sk)->no_check6_rx)) { if (unlikely(count == ARRAY_SIZE(stack))) { flush_stack(stack, count, skb, ~0); + inner_flushed = true; count = 0; } stack[count++] = sk; @@ -821,7 +836,10 @@ start_lookup: if (count) { flush_stack(stack, count, skb, count - 1); } else { - kfree_skb(skb); + if (!inner_flushed) + UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, + proto == IPPROTO_UDPLITE); + consume_skb(skb); } return 0; } @@ -873,7 +891,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, */ if (ipv6_addr_is_multicast(daddr)) return __udp6_lib_mcast_deliver(net, skb, - saddr, daddr, udptable); + saddr, daddr, udptable, proto); /* Unicast */ @@ -925,14 +943,11 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, return 0; short_packet: - LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", - proto == IPPROTO_UDPLITE ? "-Lite" : "", - saddr, - ntohs(uh->source), - ulen, - skb->len, - daddr, - ntohs(uh->dest)); + net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", + proto == IPPROTO_UDPLITE ? "-Lite" : "", + saddr, ntohs(uh->source), + ulen, skb->len, + daddr, ntohs(uh->dest)); goto discard; csum_error: UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); @@ -1025,7 +1040,8 @@ static int udp_v6_push_pending_frames(struct sock *sk) fl6 = &inet->cork.fl.u.ip6; /* Grab the skbuff where UDP header space exists. */ - if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) + skb = skb_peek(&sk->sk_write_queue); + if (skb == NULL) goto out; /* @@ -1284,7 +1300,7 @@ back_from_confirm: /* ... which is an evident application bug. --ANK */ release_sock(sk); - LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); + net_dbg_ratelimited("udp cork app bug 2\n"); err = -EINVAL; goto out; } @@ -1296,7 +1312,7 @@ do_append_data: dontfrag = np->dontfrag; up->len += ulen; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; - err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, + err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 6b8f543f6ac6ac76c2efaec21b4489fe347f9544..b6aa8ed182579614d3738f107e839eb2e82e5371 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -42,11 +42,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | + SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | - SKB_GSO_SIT | - SKB_GSO_MPLS) || + SKB_GSO_SIT) || !(type & (SKB_GSO_UDP)))) goto out; diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 1b095ca37aa46bd86a534a5d7eff4824d65b2ef2..f11ad1d95e0e6e88294003fee338743863daa443 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -306,7 +306,7 @@ void ipxitf_down(struct ipx_interface *intrfc) spin_unlock_bh(&ipx_interfaces_lock); } -static __inline__ void __ipxitf_put(struct ipx_interface *intrfc) +static void __ipxitf_put(struct ipx_interface *intrfc) { if (atomic_dec_and_test(&intrfc->refcnt)) __ipxitf_down(intrfc); @@ -1745,8 +1745,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock, memcpy(usipx->sipx_node, ipxs->dest_addr.node, IPX_NODE_LEN); } - rc = ipxrtr_route_packet(sk, usipx, msg->msg_iov, len, - flags & MSG_DONTWAIT); + rc = ipxrtr_route_packet(sk, usipx, msg, len, flags & MSG_DONTWAIT); if (rc >= 0) rc = len; out: @@ -1808,8 +1807,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov, - copied); + rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied); if (rc) goto out_free; if (skb->tstamp.tv64) diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index e15c16a517e72c162d3442bfa11fccfe3ef371b5..c1d247ebe916685ba2d48985462e6592f12d4a5b 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c @@ -45,7 +45,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v) } i = list_entry(v, struct ipx_interface, node); - seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum)); + seq_printf(seq, "%08X ", ntohl(i->if_netnum)); seq_printf(seq, "%02X%02X%02X%02X%02X%02X ", i->if_node[0], i->if_node[1], i->if_node[2], i->if_node[3], i->if_node[4], i->if_node[5]); @@ -87,10 +87,10 @@ static int ipx_seq_route_show(struct seq_file *seq, void *v) rt = list_entry(v, struct ipx_route, node); - seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net)); + seq_printf(seq, "%08X ", ntohl(rt->ir_net)); if (rt->ir_routed) - seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n", - (long unsigned int)ntohl(rt->ir_intrfc->if_netnum), + seq_printf(seq, "%08X %02X%02X%02X%02X%02X%02X\n", + ntohl(rt->ir_intrfc->if_netnum), rt->ir_router_node[0], rt->ir_router_node[1], rt->ir_router_node[2], rt->ir_router_node[3], rt->ir_router_node[4], rt->ir_router_node[5]); @@ -194,19 +194,19 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v) s = v; ipxs = ipx_sk(s); #ifdef CONFIG_IPX_INTERN - seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", - (unsigned long)ntohl(ipxs->intrfc->if_netnum), + seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ", + ntohl(ipxs->intrfc->if_netnum), ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3], ipxs->node[4], ipxs->node[5], ntohs(ipxs->port)); #else - seq_printf(seq, "%08lX:%04X ", (unsigned long) ntohl(ipxs->intrfc->if_netnum), + seq_printf(seq, "%08X:%04X ", ntohl(ipxs->intrfc->if_netnum), ntohs(ipxs->port)); #endif /* CONFIG_IPX_INTERN */ if (s->sk_state != TCP_ESTABLISHED) seq_printf(seq, "%-28s", "Not_Connected"); else { - seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", - (unsigned long)ntohl(ipxs->dest_addr.net), + seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ", + ntohl(ipxs->dest_addr.net), ipxs->dest_addr.node[0], ipxs->dest_addr.node[1], ipxs->dest_addr.node[2], ipxs->dest_addr.node[3], ipxs->dest_addr.node[4], ipxs->dest_addr.node[5], diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c index 67e7ad3d46b1fb4489a175836351607e7f5ae741..3e2a32a9f3bda13459d974fed70bb8dcc4c54fea 100644 --- a/net/ipx/ipx_route.c +++ b/net/ipx/ipx_route.c @@ -165,7 +165,7 @@ int ipxrtr_route_skb(struct sk_buff *skb) * Route an outgoing frame from a socket. */ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, - struct iovec *iov, size_t len, int noblock) + struct msghdr *msg, size_t len, int noblock) { struct sk_buff *skb; struct ipx_sock *ipxs = ipx_sk(sk); @@ -229,7 +229,7 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, memcpy(ipx->ipx_dest.node, usipx->sipx_node, IPX_NODE_LEN); ipx->ipx_dest.sock = usipx->sipx_port; - rc = memcpy_fromiovec(skb_put(skb, len), iov, len); + rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc) { kfree_skb(skb); goto out_put; diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c index ad7c03dedaab831c259f2041767227d955e584f3..0dafcc561ed612c550e352ace51187b937fb4bd7 100644 --- a/net/ipx/sysctl_net_ipx.c +++ b/net/ipx/sysctl_net_ipx.c @@ -9,14 +9,12 @@ #include #include #include +#include #ifndef CONFIG_SYSCTL #error This file should not be compiled without CONFIG_SYSCTL defined #endif -/* From af_ipx.c */ -extern int sysctl_ipx_pprop_broadcasting; - static struct ctl_table ipx_table[] = { { .procname = "ipx_pprop_broadcasting", diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 3f3a6cbdceb7e20054a9c427de10682cf67478b9..568edc72d7371f2b01dffa9af9cc114fad5d7967 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -84,14 +84,12 @@ static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb) struct sock *sk; int err; - IRDA_DEBUG(3, "%s()\n", __func__); - self = instance; sk = instance; err = sock_queue_rcv_skb(sk, skb); if (err) { - IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__); + pr_debug("%s(), error: no more mem!\n", __func__); self->rx_flow = FLOW_STOP; /* When we return error, TTP will need to requeue the skb */ @@ -115,7 +113,7 @@ static void irda_disconnect_indication(void *instance, void *sap, self = instance; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); /* Don't care about it, but let's not leak it */ if(skb) @@ -123,8 +121,8 @@ static void irda_disconnect_indication(void *instance, void *sap, sk = instance; if (sk == NULL) { - IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n", - __func__, self); + pr_debug("%s(%p) : BUG : sk is NULL\n", + __func__, self); return; } @@ -180,7 +178,7 @@ static void irda_connect_confirm(void *instance, void *sap, self = instance; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { @@ -201,16 +199,16 @@ static void irda_connect_confirm(void *instance, void *sap, switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { - IRDA_ERROR("%s: max_sdu_size must be 0\n", - __func__); + net_err_ratelimited("%s: max_sdu_size must be 0\n", + __func__); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { - IRDA_ERROR("%s: max_sdu_size cannot be 0\n", - __func__); + net_err_ratelimited("%s: max_sdu_size cannot be 0\n", + __func__); return; } self->max_data_size = max_sdu_size; @@ -219,8 +217,8 @@ static void irda_connect_confirm(void *instance, void *sap, self->max_data_size = irttp_get_max_seg_size(self->tsap); } - IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, - self->max_data_size); + pr_debug("%s(), max_data_size=%d\n", __func__, + self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); @@ -244,7 +242,7 @@ static void irda_connect_indication(void *instance, void *sap, self = instance; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { @@ -262,8 +260,8 @@ static void irda_connect_indication(void *instance, void *sap, switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { - IRDA_ERROR("%s: max_sdu_size must be 0\n", - __func__); + net_err_ratelimited("%s: max_sdu_size must be 0\n", + __func__); kfree_skb(skb); return; } @@ -271,8 +269,8 @@ static void irda_connect_indication(void *instance, void *sap, break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { - IRDA_ERROR("%s: max_sdu_size cannot be 0\n", - __func__); + net_err_ratelimited("%s: max_sdu_size cannot be 0\n", + __func__); kfree_skb(skb); return; } @@ -282,8 +280,8 @@ static void irda_connect_indication(void *instance, void *sap, self->max_data_size = irttp_get_max_seg_size(self->tsap); } - IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, - self->max_data_size); + pr_debug("%s(), max_data_size=%d\n", __func__, + self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); @@ -301,12 +299,10 @@ static void irda_connect_response(struct irda_sock *self) { struct sk_buff *skb; - IRDA_DEBUG(2, "%s()\n", __func__); - skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_KERNEL); if (skb == NULL) { - IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", - __func__); + pr_debug("%s() Unable to allocate sk_buff!\n", + __func__); return; } @@ -327,26 +323,24 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) struct irda_sock *self; struct sock *sk; - IRDA_DEBUG(2, "%s()\n", __func__); - self = instance; sk = instance; BUG_ON(sk == NULL); switch (flow) { case FLOW_STOP: - IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", - __func__); + pr_debug("%s(), IrTTP wants us to slow down\n", + __func__); self->tx_flow = flow; break; case FLOW_START: self->tx_flow = flow; - IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", - __func__); + pr_debug("%s(), IrTTP wants us to start again\n", + __func__); wake_up_interruptible(sk_sleep(sk)); break; default: - IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); + pr_debug("%s(), Unknown flow command!\n", __func__); /* Unknown flow command, better stop */ self->tx_flow = flow; break; @@ -368,11 +362,11 @@ static void irda_getvalue_confirm(int result, __u16 obj_id, self = priv; if (!self) { - IRDA_WARNING("%s: lost myself!\n", __func__); + net_warn_ratelimited("%s: lost myself!\n", __func__); return; } - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); /* We probably don't need to make any more queries */ iriap_close(self->iriap); @@ -380,8 +374,8 @@ static void irda_getvalue_confirm(int result, __u16 obj_id, /* Check if request succeeded */ if (result != IAS_SUCCESS) { - IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __func__, - result); + pr_debug("%s(), IAS query failed! (%d)\n", __func__, + result); self->errno = result; /* We really need it later */ @@ -413,11 +407,9 @@ static void irda_selective_discovery_indication(discinfo_t *discovery, { struct irda_sock *self; - IRDA_DEBUG(2, "%s()\n", __func__); - self = priv; if (!self) { - IRDA_WARNING("%s: lost myself!\n", __func__); + net_warn_ratelimited("%s: lost myself!\n", __func__); return; } @@ -440,8 +432,6 @@ static void irda_discovery_timeout(u_long priv) { struct irda_sock *self; - IRDA_DEBUG(2, "%s()\n", __func__); - self = (struct irda_sock *) priv; BUG_ON(self == NULL); @@ -465,7 +455,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) notify_t notify; if (self->tsap) { - IRDA_DEBUG(0, "%s: busy!\n", __func__); + pr_debug("%s: busy!\n", __func__); return -EBUSY; } @@ -483,8 +473,8 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT, ¬ify); if (self->tsap == NULL) { - IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n", - __func__); + pr_debug("%s(), Unable to allocate TSAP!\n", + __func__); return -ENOMEM; } /* Remember which TSAP selector we actually got */ @@ -505,7 +495,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid) notify_t notify; if (self->lsap) { - IRDA_WARNING("%s(), busy!\n", __func__); + net_warn_ratelimited("%s(), busy!\n", __func__); return -EBUSY; } @@ -517,7 +507,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid) self->lsap = irlmp_open_lsap(LSAP_CONNLESS, ¬ify, pid); if (self->lsap == NULL) { - IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__); + pr_debug("%s(), Unable to allocate LSAP!\n", __func__); return -ENOMEM; } @@ -538,11 +528,11 @@ static int irda_open_lsap(struct irda_sock *self, int pid) */ static int irda_find_lsap_sel(struct irda_sock *self, char *name) { - IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name); + pr_debug("%s(%p, %s)\n", __func__, self, name); if (self->iriap) { - IRDA_WARNING("%s(): busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(): busy with a previous query\n", + __func__); return -EBUSY; } @@ -577,8 +567,8 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name) /* Get the remote TSAP selector */ switch (self->ias_result->type) { case IAS_INTEGER: - IRDA_DEBUG(4, "%s() int=%d\n", - __func__, self->ias_result->t.integer); + pr_debug("%s() int=%d\n", + __func__, self->ias_result->t.integer); if (self->ias_result->t.integer != -1) self->dtsap_sel = self->ias_result->t.integer; @@ -587,7 +577,7 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name) break; default: self->dtsap_sel = 0; - IRDA_DEBUG(0, "%s(), bad type!\n", __func__); + pr_debug("%s(), bad type!\n", __func__); break; } if (self->ias_result) @@ -625,7 +615,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ __u8 dtsap_sel = 0x0; /* TSAP associated with it */ - IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name); + pr_debug("%s(), name=%s\n", __func__, name); /* Ask lmp for the current discovery log * Note : we have to use irlmp_get_discoveries(), as opposed @@ -646,8 +636,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) /* Try the address in the log */ self->daddr = discoveries[i].daddr; self->saddr = 0x0; - IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", - __func__, self->daddr); + pr_debug("%s(), trying daddr = %08x\n", + __func__, self->daddr); /* Query remote LM-IAS for this service */ err = irda_find_lsap_sel(self, name); @@ -655,8 +645,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) case 0: /* We found the requested service */ if(daddr != DEV_ADDR_ANY) { - IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n", - __func__, name); + pr_debug("%s(), discovered service ''%s'' in two different devices !!!\n", + __func__, name); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -ENOTUNIQ; @@ -670,7 +660,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) break; default: /* Something bad did happen :-( */ - IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__); + pr_debug("%s(), unexpected IAS query failure\n", + __func__); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -EHOSTUNREACH; @@ -681,8 +672,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) /* Check out what we found */ if(daddr == DEV_ADDR_ANY) { - IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", - __func__, name); + pr_debug("%s(), cannot discover service ''%s'' in any device !!!\n", + __func__, name); self->daddr = DEV_ADDR_ANY; return -EADDRNOTAVAIL; } @@ -692,8 +683,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) self->saddr = 0x0; self->dtsap_sel = dtsap_sel; - IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n", - __func__, name, self->daddr); + pr_debug("%s(), discovered requested service ''%s'' at address %08x\n", + __func__, name, self->daddr); return 0; } @@ -725,8 +716,8 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr, saddr.sir_addr = self->saddr; } - IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); - IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr); + pr_debug("%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); + pr_debug("%s(), addr = %08x\n", __func__, saddr.sir_addr); /* uaddr_len come to us uninitialised */ *uaddr_len = sizeof (struct sockaddr_irda); @@ -746,8 +737,6 @@ static int irda_listen(struct socket *sock, int backlog) struct sock *sk = sock->sk; int err = -EOPNOTSUPP; - IRDA_DEBUG(2, "%s()\n", __func__); - lock_sock(sk); if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && @@ -779,7 +768,7 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct irda_sock *self = irda_sk(sk); int err; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); if (addr_len != sizeof(struct sockaddr_irda)) return -EINVAL; @@ -792,7 +781,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) self->pid = addr->sir_lsap_sel; err = -EOPNOTSUPP; if (self->pid & 0x80) { - IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); + pr_debug("%s(), extension in PID not supp!\n", + __func__); goto out; } err = irda_open_lsap(self, self->pid); @@ -845,8 +835,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) struct sk_buff *skb; int err; - IRDA_DEBUG(2, "%s()\n", __func__); - err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); if (err) return err; @@ -911,7 +899,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) new->tsap = irttp_dup(self->tsap, new); err = -EPERM; /* value does not seem to make sense. -arnd */ if (!new->tsap) { - IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); + pr_debug("%s(), dup failed!\n", __func__); kfree_skb(skb); goto out; } @@ -971,7 +959,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, struct irda_sock *self = irda_sk(sk); int err; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); lock_sock(sk); /* Don't allow connect for Ultra sockets */ @@ -1007,13 +995,13 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, /* Try to find one suitable */ err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); if (err) { - IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); + pr_debug("%s(), auto-connect failed!\n", __func__); goto out; } } else { /* Use the one provided by the user */ self->daddr = addr->sir_addr; - IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr); + pr_debug("%s(), daddr = %08x\n", __func__, self->daddr); /* If we don't have a valid service name, we assume the * user want to connect on a specific LSAP. Prevent @@ -1023,7 +1011,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, /* Query remote LM-IAS using service name */ err = irda_find_lsap_sel(self, addr->sir_name); if (err) { - IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); + pr_debug("%s(), connect failed!\n", __func__); goto out; } } else { @@ -1048,7 +1036,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, self->saddr, self->daddr, NULL, self->max_sdu_size_rx, NULL); if (err) { - IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); + pr_debug("%s(), connect failed!\n", __func__); goto out; } @@ -1098,8 +1086,6 @@ static int irda_create(struct net *net, struct socket *sock, int protocol, struct sock *sk; struct irda_sock *self; - IRDA_DEBUG(2, "%s()\n", __func__); - if (net != &init_net) return -EAFNOSUPPORT; @@ -1119,7 +1105,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol, return -ENOMEM; self = irda_sk(sk); - IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self); + pr_debug("%s() : self is %p\n", __func__, self); init_waitqueue_head(&self->query_wait); @@ -1181,7 +1167,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol, */ static void irda_destroy_socket(struct irda_sock *self) { - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); /* Unregister with IrLMP */ irlmp_unregister_client(self->ckey); @@ -1218,8 +1204,6 @@ static int irda_release(struct socket *sock) { struct sock *sk = sock->sk; - IRDA_DEBUG(2, "%s()\n", __func__); - if (sk == NULL) return 0; @@ -1286,7 +1270,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; int err = -EPIPE; - IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); + pr_debug("%s(), len=%zd\n", __func__, len); /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | @@ -1322,8 +1306,8 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, /* Check that we don't send out too big frames */ if (len > self->max_data_size) { - IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", - __func__, len, self->max_data_size); + pr_debug("%s(), Chopping frame from %zd to %d bytes!\n", + __func__, len, self->max_data_size); len = self->max_data_size; } @@ -1335,7 +1319,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, skb_reserve(skb, self->max_header_size + 16); skb_reset_transport_header(skb); skb_put(skb, len); - err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); + err = memcpy_from_msg(skb_transport_header(skb), msg, len); if (err) { kfree_skb(skb); goto out_err; @@ -1347,7 +1331,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, */ err = irttp_data_request(self->tsap, skb); if (err) { - IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); + pr_debug("%s(), err=%d\n", __func__, err); goto out_err; } @@ -1378,8 +1362,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, size_t copied; int err; - IRDA_DEBUG(4, "%s()\n", __func__); - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) @@ -1389,12 +1371,12 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, copied = skb->len; if (copied > size) { - IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", - __func__, copied, size); + pr_debug("%s(), Received truncated frame (%zd < %zd)!\n", + __func__, copied, size); copied = size; msg->msg_flags |= MSG_TRUNC; } - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + skb_copy_datagram_msg(skb, 0, msg, copied); skb_free_datagram(sk, skb); @@ -1406,7 +1388,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { - IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); + pr_debug("%s(), Starting IrTTP\n", __func__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } @@ -1428,8 +1410,6 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, int target, err; long timeo; - IRDA_DEBUG(3, "%s()\n", __func__); - if ((err = sock_error(sk)) < 0) return err; @@ -1486,7 +1466,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, } chunk = min_t(unsigned int, skb->len, size); - if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { + if (memcpy_to_msg(msg, skb->data, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (copied == 0) copied = -EFAULT; @@ -1501,15 +1481,15 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, /* put the skb back if we didn't use it up.. */ if (skb->len) { - IRDA_DEBUG(1, "%s(), back on q!\n", - __func__); + pr_debug("%s(), back on q!\n", + __func__); skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); } else { - IRDA_DEBUG(0, "%s() questionable!?\n", __func__); + pr_debug("%s() questionable!?\n", __func__); /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); @@ -1525,7 +1505,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { - IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); + pr_debug("%s(), Starting IrTTP\n", __func__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } @@ -1549,7 +1529,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; int err; - IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); + pr_debug("%s(), len=%zd\n", __func__, len); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) return -EINVAL; @@ -1573,9 +1553,8 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { - IRDA_DEBUG(0, "%s(), Warning to much data! " - "Chopping frame from %zd to %d bytes!\n", - __func__, len, self->max_data_size); + pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n", + __func__, len, self->max_data_size); len = self->max_data_size; } @@ -1588,9 +1567,9 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); - IRDA_DEBUG(4, "%s(), appending user data\n", __func__); + pr_debug("%s(), appending user data\n", __func__); skb_put(skb, len); - err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); + err = memcpy_from_msg(skb_transport_header(skb), msg, len); if (err) { kfree_skb(skb); goto out; @@ -1602,7 +1581,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, */ err = irttp_udata_request(self->tsap, skb); if (err) { - IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); + pr_debug("%s(), err=%d\n", __func__, err); goto out; } @@ -1631,7 +1610,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; int err; - IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); + pr_debug("%s(), len=%zd\n", __func__, len); err = -EINVAL; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) @@ -1659,7 +1638,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, pid = addr->sir_lsap_sel; if (pid & 0x80) { - IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); + pr_debug("%s(), extension in PID not supp!\n", + __func__); err = -EOPNOTSUPP; goto out; } @@ -1668,8 +1648,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, * port. Jean II */ if ((self->lsap == NULL) || (sk->sk_state != TCP_ESTABLISHED)) { - IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", - __func__); + pr_debug("%s(), socket not bound to Ultra PID.\n", + __func__); err = -ENOTCONN; goto out; } @@ -1682,9 +1662,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { - IRDA_DEBUG(0, "%s(), Warning to much data! " - "Chopping frame from %zd to %d bytes!\n", - __func__, len, self->max_data_size); + pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n", + __func__, len, self->max_data_size); len = self->max_data_size; } @@ -1697,9 +1676,9 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); - IRDA_DEBUG(4, "%s(), appending user data\n", __func__); + pr_debug("%s(), appending user data\n", __func__); skb_put(skb, len); - err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); + err = memcpy_from_msg(skb_transport_header(skb), msg, len); if (err) { kfree_skb(skb); goto out; @@ -1708,7 +1687,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, err = irlmp_connless_data_request((bound ? self->lsap : NULL), skb, pid); if (err) - IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); + pr_debug("%s(), err=%d\n", __func__, err); out: release_sock(sk); return err ? : len; @@ -1723,7 +1702,7 @@ static int irda_shutdown(struct socket *sock, int how) struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); - IRDA_DEBUG(1, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); lock_sock(sk); @@ -1762,8 +1741,6 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, struct irda_sock *self = irda_sk(sk); unsigned int mask; - IRDA_DEBUG(4, "%s()\n", __func__); - poll_wait(file, sk_sleep(sk), wait); mask = 0; @@ -1771,13 +1748,13 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, if (sk->sk_err) mask |= POLLERR; if (sk->sk_shutdown & RCV_SHUTDOWN) { - IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); + pr_debug("%s(), POLLHUP\n", __func__); mask |= POLLHUP; } /* Readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) { - IRDA_DEBUG(4, "Socket is readable\n"); + pr_debug("Socket is readable\n"); mask |= POLLIN | POLLRDNORM; } @@ -1785,7 +1762,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, switch (sk->sk_type) { case SOCK_STREAM: if (sk->sk_state == TCP_CLOSE) { - IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); + pr_debug("%s(), POLLHUP\n", __func__); mask |= POLLHUP; } @@ -1823,7 +1800,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct sock *sk = sock->sk; int err; - IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); + pr_debug("%s(), cmd=%#x\n", __func__, cmd); err = -EINVAL; switch (cmd) { @@ -1864,7 +1841,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCSIFMETRIC: break; default: - IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); + pr_debug("%s(), doing device ioctl!\n", __func__); err = -ENOIOCTLCMD; } @@ -1900,7 +1877,7 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, struct ias_attrib * ias_attr; /* Attribute in IAS object */ int opt, free_ias = 0, err = 0; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; @@ -2100,7 +2077,8 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, /* Check is the user space own the object */ if(ias_attr->value->owner != IAS_USER_ATTR) { - IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__); + pr_debug("%s(), attempting to delete a kernel attribute\n", + __func__); kfree(ias_opt); err = -EPERM; goto out; @@ -2123,12 +2101,12 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, /* Only possible for a seqpacket service (TTP with SAR) */ if (sk->sk_type != SOCK_SEQPACKET) { - IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", - __func__, opt); + pr_debug("%s(), setting max_sdu_size = %d\n", + __func__, opt); self->max_sdu_size_rx = opt; } else { - IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n", - __func__); + net_warn_ratelimited("%s: not allowed to set MAXSDUSIZE for this socket type!\n", + __func__); err = -ENOPROTOOPT; goto out; } @@ -2256,7 +2234,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, int err = 0; int offset, total; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; @@ -2439,8 +2417,8 @@ bed: /* Check that we can proceed with IAP */ if (self->iriap) { - IRDA_WARNING("%s: busy with a previous query\n", - __func__); + net_warn_ratelimited("%s: busy with a previous query\n", + __func__); kfree(ias_opt); err = -EBUSY; goto out; @@ -2544,7 +2522,8 @@ bed: /* Wait until a node is discovered */ if (!self->cachedaddr) { - IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__); + pr_debug("%s(), nothing discovered yet, going to sleep...\n", + __func__); /* Set watchdog timer to expire in ms. */ self->errno = 0; @@ -2560,14 +2539,14 @@ bed: /* If watchdog is still activated, kill it! */ del_timer(&(self->watchdog)); - IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__); + pr_debug("%s(), ...waking up !\n", __func__); if (err != 0) goto out; } else - IRDA_DEBUG(1, "%s(), found immediately !\n", - __func__); + pr_debug("%s(), found immediately !\n", + __func__); /* Tell IrLMP that we have been notified */ irlmp_update_client(self->ckey, self->mask.word, diff --git a/net/irda/discovery.c b/net/irda/discovery.c index 6786e7f193d298ba0a0b46ed28d4144ad9084743..364d70aed068d8bcec4aa3d43e2314472aef307f 100644 --- a/net/irda/discovery.c +++ b/net/irda/discovery.c @@ -112,8 +112,6 @@ void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log) { discovery_t *discovery; - IRDA_DEBUG(4, "%s()\n", __func__); - /* * If log is missing this means that IrLAP was unable to perform the * discovery, so restart discovery again with just the half timeout @@ -159,8 +157,6 @@ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force) int i = 0; /* How many we expired */ IRDA_ASSERT(log != NULL, return;); - IRDA_DEBUG(4, "%s()\n", __func__); - spin_lock_irqsave(&log->hb_spinlock, flags); discovery = (discovery_t *) hashbin_get_first(log); @@ -232,10 +228,10 @@ void irlmp_dump_discoveries(hashbin_t *log) discovery = (discovery_t *) hashbin_get_first(log); while (discovery != NULL) { - IRDA_DEBUG(0, "Discovery:\n"); - IRDA_DEBUG(0, " daddr=%08x\n", discovery->data.daddr); - IRDA_DEBUG(0, " saddr=%08x\n", discovery->data.saddr); - IRDA_DEBUG(0, " nickname=%s\n", discovery->data.info); + pr_debug("Discovery:\n"); + pr_debug(" daddr=%08x\n", discovery->data.daddr); + pr_debug(" saddr=%08x\n", discovery->data.saddr); + pr_debug(" nickname=%s\n", discovery->data.info); discovery = (discovery_t *) hashbin_get_next(log); } diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c index 4490a675b1bbe653117afe4fad0ee24d9dcb30f5..3af219545f6d8311edb805ca7cfd00535ea8daff 100644 --- a/net/irda/ircomm/ircomm_core.c +++ b/net/irda/ircomm/ircomm_core.c @@ -69,7 +69,8 @@ static int __init ircomm_init(void) { ircomm = hashbin_new(HB_LOCK); if (ircomm == NULL) { - IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__); + net_err_ratelimited("%s(), can't allocate hashbin!\n", + __func__); return -ENOMEM; } @@ -83,15 +84,13 @@ static int __init ircomm_init(void) } #endif /* CONFIG_PROC_FS */ - IRDA_MESSAGE("IrCOMM protocol (Dag Brattli)\n"); + net_info_ratelimited("IrCOMM protocol (Dag Brattli)\n"); return 0; } static void __exit ircomm_cleanup(void) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - hashbin_delete(ircomm, (FREE_FUNC) __ircomm_close); #ifdef CONFIG_PROC_FS @@ -110,8 +109,8 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line) struct ircomm_cb *self = NULL; int ret; - IRDA_DEBUG(2, "%s(), service_type=0x%02x\n", __func__ , - service_type); + pr_debug("%s(), service_type=0x%02x\n", __func__ , + service_type); IRDA_ASSERT(ircomm != NULL, return NULL;); @@ -154,8 +153,6 @@ EXPORT_SYMBOL(ircomm_open); */ static int __ircomm_close(struct ircomm_cb *self) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Disconnect link if any */ ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL); @@ -190,8 +187,6 @@ int ircomm_close(struct ircomm_cb *self) IRDA_ASSERT(self != NULL, return -EIO;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;); - IRDA_DEBUG(0, "%s()\n", __func__ ); - entry = hashbin_remove(ircomm, self->line, NULL); IRDA_ASSERT(entry == self, return -1;); @@ -215,8 +210,6 @@ int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel, struct ircomm_info info; int ret; - IRDA_DEBUG(2 , "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); @@ -242,8 +235,6 @@ EXPORT_SYMBOL(ircomm_connect_request); void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* * If there are any data hiding in the control channel, we must * deliver it first. The side effect is that the control channel @@ -254,7 +245,7 @@ void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb, info->qos, info->max_data_size, info->max_header_size, skb); else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -271,8 +262,6 @@ int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata) IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); - IRDA_DEBUG(4, "%s()\n", __func__ ); - ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL); return ret; @@ -289,15 +278,13 @@ EXPORT_SYMBOL(ircomm_connect_response); void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - if (self->notify.connect_confirm ) self->notify.connect_confirm(self->notify.instance, self, info->qos, info->max_data_size, info->max_header_size, skb); else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -311,8 +298,6 @@ int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb) { int ret; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -EFAULT;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); IRDA_ASSERT(skb != NULL, return -EFAULT;); @@ -332,14 +317,12 @@ EXPORT_SYMBOL(ircomm_data_request); */ void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(skb->len > 0, return;); if (self->notify.data_indication) self->notify.data_indication(self->notify.instance, self, skb); else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -364,8 +347,8 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb) * fine */ if (unlikely(skb->len < (clen + 1))) { - IRDA_DEBUG(2, "%s() throwing away illegal frame\n", - __func__ ); + pr_debug("%s() throwing away illegal frame\n", + __func__); return; } @@ -383,8 +366,8 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb) if (skb->len) ircomm_data_indication(self, skb); else { - IRDA_DEBUG(4, "%s(), data was control info only!\n", - __func__ ); + pr_debug("%s(), data was control info only!\n", + __func__); } } @@ -398,8 +381,6 @@ int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb) { int ret; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -EFAULT;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); IRDA_ASSERT(skb != NULL, return -EFAULT;); @@ -420,8 +401,6 @@ EXPORT_SYMBOL(ircomm_control_request); static void ircomm_control_indication(struct ircomm_cb *self, struct sk_buff *skb, int clen) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Use udata for delivering data on the control channel */ if (self->notify.udata_indication) { struct sk_buff *ctrl_skb; @@ -441,7 +420,7 @@ static void ircomm_control_indication(struct ircomm_cb *self, * see ircomm_tty_control_indication(). */ dev_kfree_skb(ctrl_skb); } else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -456,8 +435,6 @@ int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata) struct ircomm_info info; int ret; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); @@ -477,15 +454,13 @@ EXPORT_SYMBOL(ircomm_disconnect_request); void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(info != NULL, return;); if (self->notify.disconnect_indication) { self->notify.disconnect_indication(self->notify.instance, self, info->reason, skb); } else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -497,8 +472,6 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, */ void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c index b172c65223282888e90c1a45cf24383e63c33d8a..b0730ac9f3880e992d8e2b93f1f7ee60d974673c 100644 --- a/net/irda/ircomm/ircomm_event.c +++ b/net/irda/ircomm/ircomm_event.c @@ -54,8 +54,7 @@ const char *const ircomm_state[] = { "IRCOMM_CONN", }; -#ifdef CONFIG_IRDA_DEBUG -static const char *const ircomm_event[] = { +static const char *const ircomm_event[] __maybe_unused = { "IRCOMM_CONNECT_REQUEST", "IRCOMM_CONNECT_RESPONSE", "IRCOMM_TTP_CONNECT_INDICATION", @@ -73,7 +72,6 @@ static const char *const ircomm_event[] = { "IRCOMM_CONTROL_REQUEST", "IRCOMM_CONTROL_INDICATION", }; -#endif /* CONFIG_IRDA_DEBUG */ static int (*state[])(struct ircomm_cb *self, IRCOMM_EVENT event, struct sk_buff *skb, struct ircomm_info *info) = @@ -106,8 +104,8 @@ static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_connect_indication(self, skb, info); break; default: - IRDA_DEBUG(4, "%s(), unknown event: %s\n", __func__ , - ircomm_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_event[event]); ret = -EINVAL; } return ret; @@ -136,8 +134,8 @@ static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_disconnect_indication(self, skb, info); break; default: - IRDA_DEBUG(0, "%s(), unknown event: %s\n", __func__ , - ircomm_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_event[event]); ret = -EINVAL; } return ret; @@ -169,8 +167,8 @@ static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_disconnect_indication(self, skb, info); break; default: - IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ , - ircomm_event[event]); + pr_debug("%s(), unknown event = %s\n", __func__ , + ircomm_event[event]); ret = -EINVAL; } return ret; @@ -211,8 +209,8 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, ret = self->issue.disconnect_request(self, skb, info); break; default: - IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ , - ircomm_event[event]); + pr_debug("%s(), unknown event = %s\n", __func__ , + ircomm_event[event]); ret = -EINVAL; } return ret; @@ -227,8 +225,8 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __func__ , - ircomm_state[self->state], ircomm_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_state[self->state], ircomm_event[event]); return (*state[self->state])(self, event, skb, info); } @@ -243,6 +241,6 @@ void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state) { self->state = state; - IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __func__ , - ircomm_state[self->state], self->service_type); + pr_debug("%s: next state=%s, service type=%d\n", __func__ , + ircomm_state[self->state], self->service_type); } diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c index 6536114adf37a285db123efc0aa7c945d460f318..e4cc847bb9332b3b291b70731efedda7c0f47989 100644 --- a/net/irda/ircomm/ircomm_lmp.c +++ b/net/irda/ircomm/ircomm_lmp.c @@ -52,8 +52,6 @@ static int ircomm_lmp_connect_request(struct ircomm_cb *self, { int ret = 0; - IRDA_DEBUG(0, "%s()\n", __func__ ); - /* Don't forget to refcount it - should be NULL anyway */ if(userdata) skb_get(userdata); @@ -74,8 +72,6 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self, { struct sk_buff *tx_skb; - IRDA_DEBUG(0, "%s()\n", __func__ ); - /* Any userdata supplied? */ if (userdata == NULL) { tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); @@ -107,8 +103,6 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self, struct sk_buff *tx_skb; int ret; - IRDA_DEBUG(0, "%s()\n", __func__ ); - if (!userdata) { tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (!tx_skb) @@ -144,13 +138,11 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb) cb = (struct irda_skb_cb *) skb->cb; - IRDA_DEBUG(2, "%s()\n", __func__ ); - line = cb->line; self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL); if (!self) { - IRDA_DEBUG(2, "%s(), didn't find myself\n", __func__ ); + pr_debug("%s(), didn't find myself\n", __func__); return; } @@ -160,7 +152,7 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb) self->pkt_count--; if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) { - IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __func__ ); + pr_debug("%s(), asking TTY to start again!\n", __func__); self->flow_status = FLOW_START; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, @@ -187,7 +179,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self, cb->line = self->line; - IRDA_DEBUG(4, "%s(), sending frame\n", __func__ ); + pr_debug("%s(), sending frame\n", __func__); /* Don't forget to refcount it - see ircomm_tty_do_softint() */ skb_get(skb); @@ -196,7 +188,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self, skb->destructor = ircomm_lmp_flow_control; if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { - IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __func__ ); + pr_debug("%s(), asking TTY to slow down!\n", __func__); self->flow_status = FLOW_STOP; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, @@ -204,7 +196,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self, } ret = irlmp_data_request(self->lsap, skb); if (ret) { - IRDA_ERROR("%s(), failed\n", __func__); + net_err_ratelimited("%s(), failed\n", __func__); /* irlmp_data_request already free the packet */ } @@ -222,8 +214,6 @@ static int ircomm_lmp_data_indication(void *instance, void *sap, { struct ircomm_cb *self = (struct ircomm_cb *) instance; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); @@ -252,8 +242,6 @@ static void ircomm_lmp_connect_confirm(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -285,8 +273,6 @@ static void ircomm_lmp_connect_indication(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *)instance; struct ircomm_info info; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -315,8 +301,6 @@ static void ircomm_lmp_disconnect_indication(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); @@ -338,8 +322,6 @@ int ircomm_open_lsap(struct ircomm_cb *self) { notify_t notify; - IRDA_DEBUG(0, "%s()\n", __func__ ); - /* Register callbacks */ irda_notify_init(¬ify); notify.data_indication = ircomm_lmp_data_indication; @@ -351,7 +333,7 @@ int ircomm_open_lsap(struct ircomm_cb *self) self->lsap = irlmp_open_lsap(LSAP_ANY, ¬ify, 0); if (!self->lsap) { - IRDA_DEBUG(0,"%sfailed to allocate tsap\n", __func__ ); + pr_debug("%sfailed to allocate tsap\n", __func__); return -1; } self->slsap_sel = self->lsap->slsap_sel; diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index f80b1a6a244bcadc63124d4a303adc8163b94667..3c4caa60c9265dd95f62d7a3aba84d40cdba0492 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c @@ -61,12 +61,12 @@ static int ircomm_param_dte(void *instance, irda_param_t *param, int get); static int ircomm_param_dce(void *instance, irda_param_t *param, int get); static int ircomm_param_poll(void *instance, irda_param_t *param, int get); -static pi_minor_info_t pi_minor_call_table_common[] = { +static const pi_minor_info_t pi_minor_call_table_common[] = { { ircomm_param_service_type, PV_INT_8_BITS }, { ircomm_param_port_type, PV_INT_8_BITS }, { ircomm_param_port_name, PV_STRING } }; -static pi_minor_info_t pi_minor_call_table_non_raw[] = { +static const pi_minor_info_t pi_minor_call_table_non_raw[] = { { ircomm_param_data_rate, PV_INT_32_BITS | PV_BIG_ENDIAN }, { ircomm_param_data_format, PV_INT_8_BITS }, { ircomm_param_flow_control, PV_INT_8_BITS }, @@ -74,13 +74,13 @@ static pi_minor_info_t pi_minor_call_table_non_raw[] = { { ircomm_param_enq_ack, PV_INT_16_BITS }, { ircomm_param_line_status, PV_INT_8_BITS } }; -static pi_minor_info_t pi_minor_call_table_9_wire[] = { +static const pi_minor_info_t pi_minor_call_table_9_wire[] = { { ircomm_param_dte, PV_INT_8_BITS }, { ircomm_param_dce, PV_INT_8_BITS }, { ircomm_param_poll, PV_NO_VALUE }, }; -static pi_major_info_t pi_major_call_table[] = { +static const pi_major_info_t pi_major_call_table[] = { { pi_minor_call_table_common, 3 }, { pi_minor_call_table_non_raw, 6 }, { pi_minor_call_table_9_wire, 3 } @@ -101,8 +101,6 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) struct sk_buff *skb; int count; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); @@ -130,7 +128,8 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) count = irda_param_insert(self, pi, skb_tail_pointer(skb), skb_tailroom(skb), &ircomm_param_info); if (count < 0) { - IRDA_WARNING("%s(), no room for parameter!\n", __func__); + net_warn_ratelimited("%s(), no room for parameter!\n", + __func__); spin_unlock_irqrestore(&self->spinlock, flags); return -1; } @@ -138,7 +137,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) spin_unlock_irqrestore(&self->spinlock, flags); - IRDA_DEBUG(2, "%s(), skb->len=%d\n", __func__ , skb->len); + pr_debug("%s(), skb->len=%d\n", __func__ , skb->len); if (flush) { /* ircomm_tty_do_softint will take care of the rest */ @@ -172,12 +171,11 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param, /* Find all common service types */ service_type &= self->service_type; if (!service_type) { - IRDA_DEBUG(2, - "%s(), No common service type to use!\n", __func__ ); + pr_debug("%s(), No common service type to use!\n", __func__); return -1; } - IRDA_DEBUG(0, "%s(), services in common=%02x\n", __func__ , - service_type); + pr_debug("%s(), services in common=%02x\n", __func__ , + service_type); /* * Now choose a preferred service type of those available @@ -191,8 +189,8 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param, else if (service_type & IRCOMM_3_WIRE_RAW) self->settings.service_type = IRCOMM_3_WIRE_RAW; - IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __func__ , - self->settings.service_type); + pr_debug("%s(), resulting service type=0x%02x\n", __func__ , + self->settings.service_type); /* * Now the line is ready for some communication. Check if we are a @@ -234,8 +232,8 @@ static int ircomm_param_port_type(void *instance, irda_param_t *param, int get) else { self->settings.port_type = (__u8) param->pv.i; - IRDA_DEBUG(0, "%s(), port type=%d\n", __func__ , - self->settings.port_type); + pr_debug("%s(), port type=%d\n", __func__ , + self->settings.port_type); } return 0; } @@ -254,9 +252,9 @@ static int ircomm_param_port_name(void *instance, irda_param_t *param, int get) IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); if (get) { - IRDA_DEBUG(0, "%s(), not imp!\n", __func__ ); + pr_debug("%s(), not imp!\n", __func__); } else { - IRDA_DEBUG(0, "%s(), port-name=%s\n", __func__ , param->pv.c); + pr_debug("%s(), port-name=%s\n", __func__ , param->pv.c); strncpy(self->settings.port_name, param->pv.c, 32); } @@ -281,7 +279,7 @@ static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get) else self->settings.data_rate = param->pv.i; - IRDA_DEBUG(2, "%s(), data rate = %d\n", __func__ , param->pv.i); + pr_debug("%s(), data rate = %d\n", __func__ , param->pv.i); return 0; } @@ -327,7 +325,7 @@ static int ircomm_param_flow_control(void *instance, irda_param_t *param, else self->settings.flow_control = (__u8) param->pv.i; - IRDA_DEBUG(1, "%s(), flow control = 0x%02x\n", __func__ , (__u8) param->pv.i); + pr_debug("%s(), flow control = 0x%02x\n", __func__ , (__u8)param->pv.i); return 0; } @@ -353,8 +351,8 @@ static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get) self->settings.xonxoff[1] = (__u16) param->pv.i >> 8; } - IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __func__ , - param->pv.i & 0xff, param->pv.i >> 8); + pr_debug("%s(), XON/XOFF = 0x%02x,0x%02x\n", __func__ , + param->pv.i & 0xff, param->pv.i >> 8); return 0; } @@ -380,8 +378,8 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get) self->settings.enqack[1] = (__u16) param->pv.i >> 8; } - IRDA_DEBUG(0, "%s(), ENQ/ACK = 0x%02x,0x%02x\n", __func__ , - param->pv.i & 0xff, param->pv.i >> 8); + pr_debug("%s(), ENQ/ACK = 0x%02x,0x%02x\n", __func__ , + param->pv.i & 0xff, param->pv.i >> 8); return 0; } @@ -395,7 +393,7 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get) static int ircomm_param_line_status(void *instance, irda_param_t *param, int get) { - IRDA_DEBUG(2, "%s(), not impl.\n", __func__ ); + pr_debug("%s(), not impl.\n", __func__); return 0; } @@ -456,7 +454,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get) struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; __u8 dce; - IRDA_DEBUG(1, "%s(), dce = 0x%02x\n", __func__ , (__u8) param->pv.i); + pr_debug("%s(), dce = 0x%02x\n", __func__ , (__u8)param->pv.i); dce = (__u8) param->pv.i; @@ -468,7 +466,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get) /* Check if any of the settings have changed */ if (dce & 0x0f) { if (dce & IRCOMM_DELTA_CTS) { - IRDA_DEBUG(2, "%s(), CTS\n", __func__ ); + pr_debug("%s(), CTS\n", __func__); } } diff --git a/net/irda/ircomm/ircomm_ttp.c b/net/irda/ircomm/ircomm_ttp.c index d362d711b79c8cb36283667387e142538b1e515a..4b81e0934770b3283b4bf36b8677fa943b19fc46 100644 --- a/net/irda/ircomm/ircomm_ttp.c +++ b/net/irda/ircomm/ircomm_ttp.c @@ -76,8 +76,6 @@ int ircomm_open_tsap(struct ircomm_cb *self) { notify_t notify; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* Register callbacks */ irda_notify_init(¬ify); notify.data_indication = ircomm_ttp_data_indication; @@ -91,7 +89,7 @@ int ircomm_open_tsap(struct ircomm_cb *self) self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); if (!self->tsap) { - IRDA_DEBUG(0, "%sfailed to allocate tsap\n", __func__ ); + pr_debug("%sfailed to allocate tsap\n", __func__); return -1; } self->slsap_sel = self->tsap->stsap_sel; @@ -119,8 +117,6 @@ static int ircomm_ttp_connect_request(struct ircomm_cb *self, { int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* Don't forget to refcount it - should be NULL anyway */ if(userdata) skb_get(userdata); @@ -143,8 +139,6 @@ static int ircomm_ttp_connect_response(struct ircomm_cb *self, { int ret; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* Don't forget to refcount it - should be NULL anyway */ if(userdata) skb_get(userdata); @@ -171,7 +165,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self, IRDA_ASSERT(skb != NULL, return -1;); - IRDA_DEBUG(2, "%s(), clen=%d\n", __func__ , clen); + pr_debug("%s(), clen=%d\n", __func__ , clen); /* * Insert clen field, currently we either send data only, or control @@ -188,7 +182,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self, ret = irttp_data_request(self->tsap, skb); if (ret) { - IRDA_ERROR("%s(), failed\n", __func__); + net_err_ratelimited("%s(), failed\n", __func__); /* irttp_data_request already free the packet */ } @@ -206,8 +200,6 @@ static int ircomm_ttp_data_indication(void *instance, void *sap, { struct ircomm_cb *self = (struct ircomm_cb *) instance; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); @@ -229,16 +221,14 @@ static void ircomm_ttp_connect_confirm(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(qos != NULL, goto out;); if (max_sdu_size != TTP_SAR_DISABLE) { - IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", - __func__); + net_err_ratelimited("%s(), SAR not allowed for IrCOMM!\n", + __func__); goto out; } @@ -270,16 +260,14 @@ static void ircomm_ttp_connect_indication(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *)instance; struct ircomm_info info; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(qos != NULL, goto out;); if (max_sdu_size != TTP_SAR_DISABLE) { - IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", - __func__); + net_err_ratelimited("%s(), SAR not allowed for IrCOMM!\n", + __func__); goto out; } @@ -329,8 +317,6 @@ static void ircomm_ttp_disconnect_indication(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); @@ -354,8 +340,6 @@ static void ircomm_ttp_flow_indication(void *instance, void *sap, { struct ircomm_cb *self = (struct ircomm_cb *) instance; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 61ceb4cdb4a262d56475f7a0ddb16d0e42672fc8..40695b9751c10b41e7fd310fa5d15bcef4629549 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -147,7 +147,8 @@ static int __init ircomm_tty_init(void) return -ENOMEM; ircomm_tty = hashbin_new(HB_LOCK); if (ircomm_tty == NULL) { - IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__); + net_err_ratelimited("%s(), can't allocate hashbin!\n", + __func__); put_tty_driver(driver); return -ENOMEM; } @@ -163,8 +164,8 @@ static int __init ircomm_tty_init(void) driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(driver, &ops); if (tty_register_driver(driver)) { - IRDA_ERROR("%s(): Couldn't register serial driver\n", - __func__); + net_err_ratelimited("%s(): Couldn't register serial driver\n", + __func__); put_tty_driver(driver); return -1; } @@ -173,8 +174,6 @@ static int __init ircomm_tty_init(void) static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self) { - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -195,12 +194,10 @@ static void __exit ircomm_tty_cleanup(void) { int ret; - IRDA_DEBUG(4, "%s()\n", __func__ ); - ret = tty_unregister_driver(driver); if (ret) { - IRDA_ERROR("%s(), failed to unregister driver\n", - __func__); + net_err_ratelimited("%s(), failed to unregister driver\n", + __func__); return; } @@ -219,14 +216,12 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self) notify_t notify; int ret = -ENODEV; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* Check if already open */ if (test_and_set_bit(ASYNCB_INITIALIZED, &self->port.flags)) { - IRDA_DEBUG(2, "%s(), already open so break out!\n", __func__ ); + pr_debug("%s(), already open so break out!\n", __func__); return 0; } @@ -256,7 +251,7 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self) /* Connect IrCOMM link with remote device */ ret = ircomm_tty_attach_cable(self); if (ret < 0) { - IRDA_ERROR("%s(), error attaching cable!\n", __func__); + net_err_ratelimited("%s(), error attaching cable!\n", __func__); goto err; } @@ -281,8 +276,6 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, int do_clocal = 0; unsigned long flags; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. @@ -297,12 +290,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, if (tty->termios.c_cflag & CBAUD) tty_port_raise_dtr_rts(port); port->flags |= ASYNC_NORMAL_ACTIVE; - IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); + pr_debug("%s(), O_NONBLOCK requested!\n", __func__); return 0; } if (tty->termios.c_cflag & CLOCAL) { - IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __func__ ); + pr_debug("%s(), doing CLOCAL!\n", __func__); do_clocal = 1; } @@ -316,8 +309,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, retval = 0; add_wait_queue(&port->open_wait, &wait); - IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", - __FILE__, __LINE__, tty->driver->name, port->count); + pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n", + __FILE__, __LINE__, tty->driver->name, port->count); spin_lock_irqsave(&port->lock, flags); port->count--; @@ -354,8 +347,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, break; } - IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", - __FILE__, __LINE__, tty->driver->name, port->count); + pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n", + __FILE__, __LINE__, tty->driver->name, port->count); schedule(); } @@ -369,8 +362,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, port->blocked_open--; spin_unlock_irqrestore(&port->lock, flags); - IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", - __FILE__, __LINE__, tty->driver->name, port->count); + pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n", + __FILE__, __LINE__, tty->driver->name, port->count); if (!retval) port->flags |= ASYNC_NORMAL_ACTIVE; @@ -389,10 +382,8 @@ static int ircomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) if (!self) { /* No, so make new instance */ self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); - if (self == NULL) { - IRDA_ERROR("%s(), kmalloc failed!\n", __func__); + if (self == NULL) return -ENOMEM; - } tty_port_init(&self->port); self->port.ops = &ircomm_port_ops; @@ -440,16 +431,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) unsigned long flags; int ret; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* ++ is not atomic, so this should be protected - Jean II */ spin_lock_irqsave(&self->port.lock, flags); self->port.count++; spin_unlock_irqrestore(&self->port.lock, flags); tty_port_tty_set(&self->port, tty); - IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, - self->line, self->port.count); + pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name, + self->line, self->port.count); /* Not really used by us, but lets do it anyway */ self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; @@ -469,8 +458,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) if (wait_event_interruptible(self->port.close_wait, !test_bit(ASYNCB_CLOSING, &self->port.flags))) { - IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n", - __func__); + net_warn_ratelimited("%s - got signal while blocking on ASYNC_CLOSING!\n", + __func__); return -ERESTARTSYS; } @@ -488,9 +477,9 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */ /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */ self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */ - IRDA_DEBUG(2, "%s(), IrCOMM device\n", __func__ ); + pr_debug("%s(), IrCOMM device\n", __func__); } else { - IRDA_DEBUG(2, "%s(), IrLPT device\n", __func__ ); + pr_debug("%s(), IrLPT device\n", __func__); self->service_type = IRCOMM_3_WIRE_RAW; self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */ } @@ -501,9 +490,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) ret = ircomm_tty_block_til_ready(self, tty, filp); if (ret) { - IRDA_DEBUG(2, - "%s(), returning after block_til_ready with %d\n", __func__ , - ret); + pr_debug("%s(), returning after block_til_ready with %d\n", + __func__, ret); return ret; } @@ -521,8 +509,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; struct tty_port *port = &self->port; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -572,8 +558,6 @@ static void ircomm_tty_do_softint(struct work_struct *work) unsigned long flags; struct sk_buff *skb, *ctrl_skb; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (!self || self->magic != IRCOMM_TTY_MAGIC) return; @@ -639,8 +623,8 @@ static int ircomm_tty_write(struct tty_struct *tty, int len = 0; int size; - IRDA_DEBUG(2, "%s(), count=%d, hw_stopped=%d\n", __func__ , count, - tty->hw_stopped); + pr_debug("%s(), count=%d, hw_stopped=%d\n", __func__ , count, + tty->hw_stopped); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); @@ -662,7 +646,7 @@ static int ircomm_tty_write(struct tty_struct *tty, * we don't mess up the original "safe skb" (see tx_data_size). * Jean II */ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) { - IRDA_DEBUG(1, "%s() : not initialised\n", __func__); + pr_debug("%s() : not initialised\n", __func__); #ifdef IRCOMM_NO_TX_BEFORE_INIT /* We didn't consume anything, TTY will retry */ return 0; @@ -791,7 +775,7 @@ static int ircomm_tty_write_room(struct tty_struct *tty) ret = self->max_data_size; spin_unlock_irqrestore(&self->spinlock, flags); } - IRDA_DEBUG(2, "%s(), ret=%d\n", __func__ , ret); + pr_debug("%s(), ret=%d\n", __func__ , ret); return ret; } @@ -808,8 +792,6 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) unsigned long orig_jiffies, poll_time; unsigned long flags; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -843,8 +825,6 @@ static void ircomm_tty_throttle(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -874,8 +854,6 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -889,7 +867,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty) self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS); ircomm_param_request(self, IRCOMM_DTE, TRUE); - IRDA_DEBUG(1, "%s(), FLOW_START\n", __func__ ); + pr_debug("%s(), FLOW_START\n", __func__); } ircomm_flow_request(self->ircomm, FLOW_START); } @@ -926,8 +904,6 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); - IRDA_DEBUG(0, "%s()\n", __func__ ); - if (!test_and_clear_bit(ASYNCB_INITIALIZED, &self->port.flags)) return; @@ -970,8 +946,6 @@ static void ircomm_tty_hangup(struct tty_struct *tty) struct tty_port *port = &self->port; unsigned long flags; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -999,7 +973,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty) */ static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch) { - IRDA_DEBUG(0, "%s(), not impl\n", __func__ ); + pr_debug("%s(), not impl\n", __func__); } /* @@ -1043,8 +1017,6 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) struct tty_struct *tty; int status; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -1056,15 +1028,13 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) /*wake_up_interruptible(&self->delta_msr_wait);*/ } if ((self->port.flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { - IRDA_DEBUG(2, - "%s(), ircomm%d CD now %s...\n", __func__ , self->line, - (status & IRCOMM_CD) ? "on" : "off"); + pr_debug("%s(), ircomm%d CD now %s...\n", __func__ , self->line, + (status & IRCOMM_CD) ? "on" : "off"); if (status & IRCOMM_CD) { wake_up_interruptible(&self->port.open_wait); } else { - IRDA_DEBUG(2, - "%s(), Doing serial hangup..\n", __func__ ); + pr_debug("%s(), Doing serial hangup..\n", __func__); if (tty) tty_hangup(tty); @@ -1075,8 +1045,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) if (tty && tty_port_cts_enabled(&self->port)) { if (tty->hw_stopped) { if (status & IRCOMM_CTS) { - IRDA_DEBUG(2, - "%s(), CTS tx start...\n", __func__ ); + pr_debug("%s(), CTS tx start...\n", __func__); tty->hw_stopped = 0; /* Wake up processes blocked on open */ @@ -1087,8 +1056,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) } } else { if (!(status & IRCOMM_CTS)) { - IRDA_DEBUG(2, - "%s(), CTS tx stop...\n", __func__ ); + pr_debug("%s(), CTS tx stop...\n", __func__); tty->hw_stopped = 1; } } @@ -1109,15 +1077,13 @@ static int ircomm_tty_data_indication(void *instance, void *sap, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; struct tty_struct *tty; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); tty = tty_port_tty_get(&self->port); if (!tty) { - IRDA_DEBUG(0, "%s(), no tty!\n", __func__ ); + pr_debug("%s(), no tty!\n", __func__); return 0; } @@ -1128,7 +1094,7 @@ static int ircomm_tty_data_indication(void *instance, void *sap, * params, we can just as well declare the hardware for running. */ if (tty->hw_stopped && (self->flow == FLOW_START)) { - IRDA_DEBUG(0, "%s(), polling for line settings!\n", __func__ ); + pr_debug("%s(), polling for line settings!\n", __func__); ircomm_param_request(self, IRCOMM_POLL, TRUE); /* We can just as well declare the hardware for running */ @@ -1161,8 +1127,6 @@ static int ircomm_tty_control_indication(void *instance, void *sap, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; int clen; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); @@ -1197,7 +1161,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap, switch (cmd) { case FLOW_START: - IRDA_DEBUG(2, "%s(), hw start!\n", __func__ ); + pr_debug("%s(), hw start!\n", __func__); if (tty) tty->hw_stopped = 0; @@ -1206,7 +1170,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap, break; default: /* If we get here, something is very wrong, better stop */ case FLOW_STOP: - IRDA_DEBUG(2, "%s(), hw stopped!\n", __func__ ); + pr_debug("%s(), hw stopped!\n", __func__); if (tty) tty->hw_stopped = 1; break; diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c index 2ee87bf387cc191147ce9bb3809b6fd1e3096cc4..61137f8b5293617c789172cf5c1840417cb7efa6 100644 --- a/net/irda/ircomm/ircomm_tty_attach.c +++ b/net/irda/ircomm/ircomm_tty_attach.c @@ -89,8 +89,7 @@ const char *const ircomm_tty_state[] = { "*** ERROR *** ", }; -#ifdef CONFIG_IRDA_DEBUG -static const char *const ircomm_tty_event[] = { +static const char *const ircomm_tty_event[] __maybe_unused = { "IRCOMM_TTY_ATTACH_CABLE", "IRCOMM_TTY_DETACH_CABLE", "IRCOMM_TTY_DATA_REQUEST", @@ -106,7 +105,6 @@ static const char *const ircomm_tty_event[] = { "IRCOMM_TTY_GOT_LSAPSEL", "*** ERROR ****", }; -#endif /* CONFIG_IRDA_DEBUG */ static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) = @@ -130,14 +128,12 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) { struct tty_struct *tty; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* Check if somebody has already connected to us */ if (ircomm_is_connected(self->ircomm)) { - IRDA_DEBUG(0, "%s(), already connected!\n", __func__ ); + pr_debug("%s(), already connected!\n", __func__); return 0; } @@ -163,8 +159,6 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) */ void ircomm_tty_detach_cable(struct ircomm_tty_cb *self) { - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -212,8 +206,6 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self) __u8 oct_seq[6]; __u16 hints; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -313,17 +305,17 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self) * Set default values, but only if the application for some reason * haven't set them already */ - IRDA_DEBUG(2, "%s(), data-rate = %d\n", __func__ , - self->settings.data_rate); + pr_debug("%s(), data-rate = %d\n", __func__ , + self->settings.data_rate); if (!self->settings.data_rate) self->settings.data_rate = 9600; - IRDA_DEBUG(2, "%s(), data-format = %d\n", __func__ , - self->settings.data_format); + pr_debug("%s(), data-format = %d\n", __func__ , + self->settings.data_format); if (!self->settings.data_format) self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */ - IRDA_DEBUG(2, "%s(), flow-control = %d\n", __func__ , - self->settings.flow_control); + pr_debug("%s(), flow-control = %d\n", __func__ , + self->settings.flow_control); /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/ /* Do not set delta values for the initial parameters */ @@ -367,8 +359,6 @@ static void ircomm_tty_discovery_indication(discinfo_t *discovery, struct ircomm_tty_cb *self; struct ircomm_tty_info info; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Important note : * We need to drop all passive discoveries. * The LSAP management of IrComm is deficient and doesn't deal @@ -404,8 +394,6 @@ void ircomm_tty_disconnect_indication(void *instance, void *sap, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; struct tty_struct *tty; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -436,8 +424,6 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -447,13 +433,13 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, /* Check if request succeeded */ if (result != IAS_SUCCESS) { - IRDA_DEBUG(4, "%s(), got NULL value!\n", __func__ ); + pr_debug("%s(), got NULL value!\n", __func__); return; } switch (value->type) { case IAS_OCT_SEQ: - IRDA_DEBUG(2, "%s(), got octet sequence\n", __func__ ); + pr_debug("%s(), got octet sequence\n", __func__); irda_param_extract_all(self, value->t.oct_seq, value->len, &ircomm_param_info); @@ -463,21 +449,21 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, break; case IAS_INTEGER: /* Got LSAP selector */ - IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __func__ , - value->t.integer); + pr_debug("%s(), got lsapsel = %d\n", __func__ , + value->t.integer); if (value->t.integer == -1) { - IRDA_DEBUG(0, "%s(), invalid value!\n", __func__ ); + pr_debug("%s(), invalid value!\n", __func__); } else self->dlsap_sel = value->t.integer; ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL); break; case IAS_MISSING: - IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __func__ ); + pr_debug("%s(), got IAS_MISSING\n", __func__); break; default: - IRDA_DEBUG(0, "%s(), got unknown type!\n", __func__ ); + pr_debug("%s(), got unknown type!\n", __func__); break; } irias_delete_value(value); @@ -497,8 +483,6 @@ void ircomm_tty_connect_confirm(void *instance, void *sap, { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -528,8 +512,6 @@ void ircomm_tty_connect_indication(void *instance, void *sap, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; int clen; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -559,8 +541,6 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self) { struct tty_struct *tty; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -578,10 +558,10 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self) */ if (tty_port_cts_enabled(&self->port) && ((self->settings.dce & IRCOMM_CTS) == 0)) { - IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __func__ ); + pr_debug("%s(), waiting for CTS ...\n", __func__); goto put; } else { - IRDA_DEBUG(1, "%s(), starting hardware!\n", __func__ ); + pr_debug("%s(), starting hardware!\n", __func__); tty->hw_stopped = 0; @@ -621,8 +601,6 @@ static void ircomm_tty_watchdog_timer_expired(void *data) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -642,8 +620,8 @@ int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); return (*state[self->state])(self, event, skb, info); } @@ -660,8 +638,8 @@ static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_ IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); - IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __func__ , - ircomm_tty_state[self->state], self->service_type); + pr_debug("%s: next state=%s, service type=%d\n", __func__ , + ircomm_tty_state[self->state], self->service_type); */ self->state = state; } @@ -679,8 +657,8 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_ATTACH_CABLE: /* Try to discover any remote devices */ @@ -694,8 +672,8 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, self->saddr = info->saddr; if (self->iriap) { - IRDA_WARNING("%s(), busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(), busy with a previous query\n", + __func__); return -EBUSY; } @@ -723,8 +701,8 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -743,8 +721,8 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_DISCOVERY_INDICATION: @@ -752,8 +730,8 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, self->saddr = info->saddr; if (self->iriap) { - IRDA_WARNING("%s(), busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(), busy with a previous query\n", + __func__); return -EBUSY; } @@ -796,8 +774,8 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -816,14 +794,14 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_GOT_PARAMETERS: if (self->iriap) { - IRDA_WARNING("%s(), busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(), busy with a previous query\n", + __func__); return -EBUSY; } @@ -854,8 +832,8 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -874,8 +852,8 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_GOT_LSAPSEL: @@ -903,8 +881,8 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -923,8 +901,8 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_CONNECT_CONFIRM: @@ -957,8 +935,8 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -995,13 +973,13 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self, self->settings.dce = IRCOMM_DELTA_CD; ircomm_tty_check_modem_status(self); } else { - IRDA_DEBUG(0, "%s(), hanging up!\n", __func__ ); + pr_debug("%s(), hanging up!\n", __func__); tty_port_tty_hangup(&self->port, false); } break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c index ce943853c38d6a913e6d5a113f1a71857537fb69..75ccdbd0728e18a9f4892504f2814e8e918b8097 100644 --- a/net/irda/ircomm/ircomm_tty_ioctl.c +++ b/net/irda/ircomm/ircomm_tty_ioctl.c @@ -56,8 +56,6 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self, unsigned int cflag, cval; int baud; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (!self->ircomm) return; @@ -93,7 +91,8 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self, self->settings.flow_control |= IRCOMM_RTS_CTS_IN; /* This got me. Bummer. Jean II */ if (self->service_type == IRCOMM_3_WIRE_RAW) - IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __func__); + net_warn_ratelimited("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", + __func__); } else { self->port.flags &= ~ASYNC_CTS_FLOW; self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; @@ -149,8 +148,6 @@ void ircomm_tty_set_termios(struct tty_struct *tty, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned int cflag = tty->termios.c_cflag; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if ((cflag == old_termios->c_cflag) && (RELEVANT_IFLAG(tty->termios.c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) @@ -198,8 +195,6 @@ int ircomm_tty_tiocmget(struct tty_struct *tty) struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned int result; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; @@ -223,8 +218,6 @@ int ircomm_tty_tiocmset(struct tty_struct *tty, { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; @@ -265,8 +258,6 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self, if (!retinfo) return -EFAULT; - IRDA_DEBUG(2, "%s()\n", __func__ ); - memset(&info, 0, sizeof(info)); info.line = self->line; info.flags = self->port.flags; @@ -301,8 +292,6 @@ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self, struct serial_struct new_serial; struct ircomm_tty_cb old_state, *state; - IRDA_DEBUG(0, "%s()\n", __func__ ); - if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) return -EFAULT; @@ -375,8 +364,6 @@ int ircomm_tty_ioctl(struct tty_struct *tty, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { @@ -392,11 +379,11 @@ int ircomm_tty_ioctl(struct tty_struct *tty, ret = ircomm_tty_set_serial_info(self, (struct serial_struct __user *) arg); break; case TIOCMIWAIT: - IRDA_DEBUG(0, "(), TIOCMIWAIT, not impl!\n"); + pr_debug("(), TIOCMIWAIT, not impl!\n"); break; case TIOCGICOUNT: - IRDA_DEBUG(0, "%s(), TIOCGICOUNT not impl!\n", __func__ ); + pr_debug("%s(), TIOCGICOUNT not impl!\n", __func__); #if 0 save_flags(flags); cli(); cnow = driver->icount; diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index 9e0d909390fd0569a50a2e407ee33e2610ae9cae..856736656a304d57e1cb1f1976c8046f698ff519 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c @@ -63,14 +63,14 @@ int __init irda_device_init( void) { dongles = hashbin_new(HB_NOLOCK); if (dongles == NULL) { - IRDA_WARNING("IrDA: Can't allocate dongles hashbin!\n"); + net_warn_ratelimited("IrDA: Can't allocate dongles hashbin!\n"); return -ENOMEM; } spin_lock_init(&dongles->hb_spinlock); tasks = hashbin_new(HB_LOCK); if (tasks == NULL) { - IRDA_WARNING("IrDA: Can't allocate tasks hashbin!\n"); + net_warn_ratelimited("IrDA: Can't allocate tasks hashbin!\n"); hashbin_delete(dongles, NULL); return -ENOMEM; } @@ -84,14 +84,12 @@ int __init irda_device_init( void) static void leftover_dongle(void *arg) { struct dongle_reg *reg = arg; - IRDA_WARNING("IrDA: Dongle type %x not unregistered\n", - reg->type); + net_warn_ratelimited("IrDA: Dongle type %x not unregistered\n", + reg->type); } void irda_device_cleanup(void) { - IRDA_DEBUG(4, "%s()\n", __func__); - hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete); hashbin_delete(dongles, leftover_dongle); @@ -107,7 +105,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status) { struct irlap_cb *self; - IRDA_DEBUG(4, "%s(%s)\n", __func__, status ? "TRUE" : "FALSE"); + pr_debug("%s(%s)\n", __func__, status ? "TRUE" : "FALSE"); self = (struct irlap_cb *) dev->atalk_ptr; @@ -127,7 +125,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status) irlap_start_mbusy_timer(self, SMALLBUSY_TIMEOUT); else irlap_start_mbusy_timer(self, MEDIABUSY_TIMEOUT); - IRDA_DEBUG( 4, "Media busy!\n"); + pr_debug("Media busy!\n"); } else { self->media_busy = FALSE; irlap_stop_mbusy_timer(self); @@ -147,11 +145,9 @@ int irda_device_is_receiving(struct net_device *dev) struct if_irda_req req; int ret; - IRDA_DEBUG(2, "%s()\n", __func__); - if (!dev->netdev_ops->ndo_do_ioctl) { - IRDA_ERROR("%s: do_ioctl not impl. by device driver\n", - __func__); + net_err_ratelimited("%s: do_ioctl not impl. by device driver\n", + __func__); return -1; } @@ -192,8 +188,6 @@ static int irda_task_kick(struct irda_task *task) int count = 0; int timeout; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(task != NULL, return -1;); IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;); @@ -201,15 +195,15 @@ static int irda_task_kick(struct irda_task *task) do { timeout = task->function(task); if (count++ > 100) { - IRDA_ERROR("%s: error in task handler!\n", - __func__); + net_err_ratelimited("%s: error in task handler!\n", + __func__); irda_task_delete(task); return TRUE; } } while ((timeout == 0) && (task->state != IRDA_TASK_DONE)); if (timeout < 0) { - IRDA_ERROR("%s: Error executing task!\n", __func__); + net_err_ratelimited("%s: Error executing task!\n", __func__); irda_task_delete(task); return TRUE; } @@ -241,8 +235,8 @@ static int irda_task_kick(struct irda_task *task) irda_task_timer_expired); finished = FALSE; } else { - IRDA_DEBUG(0, "%s(), not finished, and no timeout!\n", - __func__); + pr_debug("%s(), not finished, and no timeout!\n", + __func__); finished = FALSE; } @@ -259,8 +253,6 @@ static void irda_task_timer_expired(void *data) { struct irda_task *task; - IRDA_DEBUG(2, "%s()\n", __func__); - task = data; irda_task_kick(task); diff --git a/net/irda/iriap.c b/net/irda/iriap.c index e1b37f5a2691f7a79052f9b840aa0f9910182698..4a7ae32afa09b90fab1d57bcb807ca2b8a1200c6 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -43,9 +43,8 @@ #include #include -#ifdef CONFIG_IRDA_DEBUG /* FIXME: This one should go in irlmp.c */ -static const char *const ias_charset_types[] = { +static const char *const ias_charset_types[] __maybe_unused = { "CS_ASCII", "CS_ISO_8859_1", "CS_ISO_8859_2", @@ -58,7 +57,6 @@ static const char *const ias_charset_types[] = { "CS_ISO_8859_9", "CS_UNICODE" }; -#endif /* CONFIG_IRDA_DEBUG */ static hashbin_t *iriap = NULL; static void *service_handle; @@ -110,8 +108,8 @@ int __init iriap_init(void) /* Object repository - defined in irias_object.c */ irias_objects = hashbin_new(HB_LOCK); if (!irias_objects) { - IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n", - __func__); + net_warn_ratelimited("%s: Can't allocate irias_objects hashbin!\n", + __func__); hashbin_delete(iriap, NULL); return -ENOMEM; } @@ -145,7 +143,7 @@ int __init iriap_init(void) */ server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); if (!server) { - IRDA_DEBUG(0, "%s(), unable to open server\n", __func__); + pr_debug("%s(), unable to open server\n", __func__); return -1; } iriap_register_lsap(server, LSAP_IAS, IAS_SERVER); @@ -177,13 +175,9 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, { struct iriap_cb *self; - IRDA_DEBUG(2, "%s()\n", __func__); - self = kzalloc(sizeof(*self), GFP_ATOMIC); - if (!self) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (!self) return NULL; - } /* * Initialize instance @@ -223,8 +217,6 @@ EXPORT_SYMBOL(iriap_open); */ static void __iriap_close(struct iriap_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); @@ -247,8 +239,6 @@ void iriap_close(struct iriap_cb *self) { struct iriap_cb *entry; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); @@ -268,8 +258,6 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode) { notify_t notify; - IRDA_DEBUG(2, "%s()\n", __func__); - irda_notify_init(¬ify); notify.connect_confirm = iriap_connect_confirm; notify.connect_indication = iriap_connect_indication; @@ -283,7 +271,8 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode) self->lsap = irlmp_open_lsap(slsap_sel, ¬ify, 0); if (self->lsap == NULL) { - IRDA_ERROR("%s: Unable to allocated LSAP!\n", __func__); + net_err_ratelimited("%s: Unable to allocated LSAP!\n", + __func__); return -1; } self->slsap_sel = self->lsap->slsap_sel; @@ -303,8 +292,8 @@ static void iriap_disconnect_indication(void *instance, void *sap, { struct iriap_cb *self; - IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__, - irlmp_reason_str(reason), reason); + pr_debug("%s(), reason=%s [%d]\n", __func__, + irlmp_reason_str(reason), reason); self = instance; @@ -320,7 +309,7 @@ static void iriap_disconnect_indication(void *instance, void *sap, dev_kfree_skb(skb); if (self->mode == IAS_CLIENT) { - IRDA_DEBUG(4, "%s(), disconnect as client\n", __func__); + pr_debug("%s(), disconnect as client\n", __func__); iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION, @@ -333,7 +322,7 @@ static void iriap_disconnect_indication(void *instance, void *sap, if (self->confirm) self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); } else { - IRDA_DEBUG(4, "%s(), disconnect as server\n", __func__); + pr_debug("%s(), disconnect as server\n", __func__); iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION, NULL); iriap_close(self); @@ -347,16 +336,13 @@ static void iriap_disconnect_request(struct iriap_cb *self) { struct sk_buff *tx_skb; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (tx_skb == NULL) { - IRDA_DEBUG(0, - "%s(), Could not allocate an sk_buff of length %d\n", - __func__, LMP_MAX_HEADER); + pr_debug("%s(), Could not allocate an sk_buff of length %d\n", + __func__, LMP_MAX_HEADER); return; } @@ -461,14 +447,14 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, len = get_unaligned_be16(fp + n); n += 2; - IRDA_DEBUG(4, "%s(), len=%d\n", __func__, len); + pr_debug("%s(), len=%d\n", __func__, len); /* Get object ID, MSB first */ obj_id = get_unaligned_be16(fp + n); n += 2; type = fp[n++]; - IRDA_DEBUG(4, "%s(), Value type = %d\n", __func__, type); + pr_debug("%s(), Value type = %d\n", __func__, type); switch (type) { case IAS_INTEGER: @@ -477,7 +463,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, value = irias_new_integer_value(tmp_cpu32); /* Legal values restricted to 0x01-0x6f, page 15 irttp */ - IRDA_DEBUG(4, "%s(), lsap=%d\n", __func__, value->t.integer); + pr_debug("%s(), lsap=%d\n", __func__, value->t.integer); break; case IAS_STRING: charset = fp[n++]; @@ -496,11 +482,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, /* case CS_ISO_8859_9: */ /* case CS_UNICODE: */ default: - IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n", - __func__, charset, - charset < ARRAY_SIZE(ias_charset_types) ? - ias_charset_types[charset] : - "(unknown)"); + pr_debug("%s(), charset [%d] %s, not supported\n", + __func__, charset, + charset < ARRAY_SIZE(ias_charset_types) ? + ias_charset_types[charset] : + "(unknown)"); /* Aborting, close connection! */ iriap_disconnect_request(self); @@ -508,12 +494,12 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, /* break; */ } value_len = fp[n++]; - IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len); + pr_debug("%s(), strlen=%d\n", __func__, value_len); /* Make sure the string is null-terminated */ if (n + value_len < skb->len) fp[n + value_len] = 0x00; - IRDA_DEBUG(4, "Got string %s\n", fp+n); + pr_debug("Got string %s\n", fp+n); /* Will truncate to IAS_MAX_STRING bytes */ value = irias_new_string_value(fp+n); @@ -539,7 +525,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, if (self->confirm) self->confirm(IAS_SUCCESS, obj_id, value, self->priv); else { - IRDA_DEBUG(0, "%s(), missing handler!\n", __func__); + pr_debug("%s(), missing handler!\n", __func__); irias_delete_value(value); } } @@ -561,8 +547,6 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self, __be16 tmp_be16; __u8 *fp; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); IRDA_ASSERT(value != NULL, return;); @@ -623,12 +607,12 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self, memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len; break; case IAS_MISSING: - IRDA_DEBUG( 3, "%s: sending IAS_MISSING\n", __func__); + pr_debug("%s: sending IAS_MISSING\n", __func__); skb_put(tx_skb, 1); fp[n++] = value->type; break; default: - IRDA_DEBUG(0, "%s(), type not implemented!\n", __func__); + pr_debug("%s(), type not implemented!\n", __func__); break; } iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb); @@ -655,8 +639,6 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, __u8 *fp; int n; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -678,20 +660,20 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, memcpy(attr, fp+n, attr_len); n+=attr_len; attr[attr_len] = '\0'; - IRDA_DEBUG(4, "LM-IAS: Looking up %s: %s\n", name, attr); + pr_debug("LM-IAS: Looking up %s: %s\n", name, attr); obj = irias_find_object(name); if (obj == NULL) { - IRDA_DEBUG(2, "LM-IAS: Object %s not found\n", name); + pr_debug("LM-IAS: Object %s not found\n", name); iriap_getvaluebyclass_response(self, 0x1235, IAS_CLASS_UNKNOWN, &irias_missing); return; } - IRDA_DEBUG(4, "LM-IAS: found %s, id=%d\n", obj->name, obj->id); + pr_debug("LM-IAS: found %s, id=%d\n", obj->name, obj->id); attrib = irias_find_attrib(obj, attr); if (attrib == NULL) { - IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr); + pr_debug("LM-IAS: Attribute %s not found\n", attr); iriap_getvaluebyclass_response(self, obj->id, IAS_ATTRIB_UNKNOWN, &irias_missing); @@ -714,8 +696,6 @@ void iriap_send_ack(struct iriap_cb *self) struct sk_buff *tx_skb; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); @@ -745,7 +725,7 @@ void iriap_connect_request(struct iriap_cb *self) self->saddr, self->daddr, NULL, NULL); if (ret < 0) { - IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); + pr_debug("%s(), connect failed!\n", __func__); self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); } } @@ -793,8 +773,6 @@ static void iriap_connect_indication(void *instance, void *sap, { struct iriap_cb *self, *new; - IRDA_DEBUG(1, "%s()\n", __func__); - self = instance; IRDA_ASSERT(skb != NULL, return;); @@ -804,14 +782,14 @@ static void iriap_connect_indication(void *instance, void *sap, /* Start new server */ new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); if (!new) { - IRDA_DEBUG(0, "%s(), open failed\n", __func__); + pr_debug("%s(), open failed\n", __func__); goto out; } /* Now attach up the new "socket" */ new->lsap = irlmp_dup(self->lsap, new); if (!new->lsap) { - IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); + pr_debug("%s(), dup failed!\n", __func__); goto out; } @@ -841,8 +819,6 @@ static int iriap_data_indication(void *instance, void *sap, __u8 *frame; __u8 opcode; - IRDA_DEBUG(3, "%s()\n", __func__); - self = instance; IRDA_ASSERT(skb != NULL, return 0;); @@ -853,21 +829,20 @@ static int iriap_data_indication(void *instance, void *sap, if (self->mode == IAS_SERVER) { /* Call server */ - IRDA_DEBUG(4, "%s(), Calling server!\n", __func__); + pr_debug("%s(), Calling server!\n", __func__); iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb); goto out; } opcode = frame[0]; if (~opcode & IAP_LST) { - IRDA_WARNING("%s:, IrIAS multiframe commands or " - "results is not implemented yet!\n", - __func__); + net_warn_ratelimited("%s:, IrIAS multiframe commands or results is not implemented yet!\n", + __func__); goto out; } /* Check for ack frames since they don't contain any data */ if (opcode & IAP_ACK) { - IRDA_DEBUG(0, "%s() Got ack frame!\n", __func__); + pr_debug("%s() Got ack frame!\n", __func__); goto out; } @@ -875,7 +850,7 @@ static int iriap_data_indication(void *instance, void *sap, switch (opcode) { case GET_INFO_BASE: - IRDA_DEBUG(0, "IrLMP GetInfoBaseDetails not implemented!\n"); + pr_debug("IrLMP GetInfoBaseDetails not implemented!\n"); break; case GET_VALUE_BY_CLASS: iriap_do_call_event(self, IAP_RECV_F_LST, NULL); @@ -885,7 +860,7 @@ static int iriap_data_indication(void *instance, void *sap, iriap_getvaluebyclass_confirm(self, skb); break; case IAS_CLASS_UNKNOWN: - IRDA_DEBUG(1, "%s(), No such class!\n", __func__); + pr_debug("%s(), No such class!\n", __func__); /* Finished, close connection! */ iriap_disconnect_request(self); @@ -898,7 +873,7 @@ static int iriap_data_indication(void *instance, void *sap, self->priv); break; case IAS_ATTRIB_UNKNOWN: - IRDA_DEBUG(1, "%s(), No such attribute!\n", __func__); + pr_debug("%s(), No such attribute!\n", __func__); /* Finished, close connection! */ iriap_disconnect_request(self); @@ -913,8 +888,8 @@ static int iriap_data_indication(void *instance, void *sap, } break; default: - IRDA_DEBUG(0, "%s(), Unknown op-code: %02x\n", __func__, - opcode); + pr_debug("%s(), Unknown op-code: %02x\n", __func__, + opcode); break; } @@ -935,8 +910,6 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb) __u8 *fp; __u8 opcode; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -945,16 +918,16 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb) opcode = fp[0]; if (~opcode & 0x80) { - IRDA_WARNING("%s: IrIAS multiframe commands or results " - "is not implemented yet!\n", __func__); + net_warn_ratelimited("%s: IrIAS multiframe commands or results is not implemented yet!\n", + __func__); return; } opcode &= 0x7f; /* Mask away LST bit */ switch (opcode) { case GET_INFO_BASE: - IRDA_WARNING("%s: GetInfoBaseDetails not implemented yet!\n", - __func__); + net_warn_ratelimited("%s: GetInfoBaseDetails not implemented yet!\n", + __func__); break; case GET_VALUE_BY_CLASS: iriap_getvaluebyclass_indication(self, skb); diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c index 703774e29e322966ec4de6cf990bbc427c3ff221..e6098b2e048a3c1f308b839e7aa2cbc0796cbabc 100644 --- a/net/irda/iriap_event.c +++ b/net/irda/iriap_event.c @@ -187,7 +187,7 @@ static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event, case IAP_LM_DISCONNECT_INDICATION: break; default: - IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); break; } } @@ -219,7 +219,7 @@ static void state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_client_state(self, S_DISCONNECT); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); break; } } @@ -243,7 +243,7 @@ static void state_s_call(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_client_state(self, S_DISCONNECT); break; default: - IRDA_DEBUG(0, "state_s_call: Unknown event %d\n", event); + pr_debug("state_s_call: Unknown event %d\n", event); break; } } @@ -271,7 +271,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_call_state(self, S_OUTSTANDING); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); break; } } @@ -285,7 +285,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event, static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } /* @@ -307,7 +307,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_call_state(self, S_WAIT_FOR_CALL); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); break; } } @@ -320,7 +320,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event, static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } /* @@ -332,7 +332,7 @@ static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } @@ -345,7 +345,7 @@ static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } /************************************************************************** @@ -368,10 +368,8 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, switch (event) { case IAP_LM_CONNECT_INDICATION: tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); - if (tx_skb == NULL) { - IRDA_WARNING("%s: unable to malloc!\n", __func__); + if (tx_skb == NULL) return; - } /* Reserve space for MUX_CONTROL and LAP header */ skb_reserve(tx_skb, LMP_MAX_HEADER); @@ -388,7 +386,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_r_connect_state(self, R_RECEIVING); break; default: - IRDA_DEBUG(0, "%s(), unknown event %d\n", __func__, event); + pr_debug("%s(), unknown event %d\n", __func__, event); break; } } @@ -399,8 +397,6 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - switch (event) { case IAP_LM_DISCONNECT_INDICATION: /* Abort call */ @@ -408,7 +404,7 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_r_connect_state(self, R_WAITING); break; default: - IRDA_DEBUG(0, "%s(), unknown event!\n", __func__); + pr_debug("%s(), unknown event!\n", __func__); break; } } @@ -423,13 +419,13 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } /* @@ -441,8 +437,6 @@ static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - switch (event) { case IAP_RECV_F_LST: iriap_next_r_connect_state(self, R_EXECUTE); @@ -450,7 +444,7 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, iriap_call_indication(self, skb); break; default: - IRDA_DEBUG(0, "%s(), unknown event!\n", __func__); + pr_debug("%s(), unknown event!\n", __func__); break; } } @@ -464,8 +458,6 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); @@ -485,7 +477,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, irlmp_data_request(self->lsap, skb); break; default: - IRDA_DEBUG(0, "%s(), unknown event!\n", __func__); + pr_debug("%s(), unknown event!\n", __func__); break; } } @@ -493,7 +485,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), event=%d\n", __func__, event); + pr_debug("%s(), event=%d\n", __func__, event); switch (event) { case IAP_RECV_F_LST: diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c index f07ed9fd5792fabe7e1cffc1016330ee01a0fab8..53b86d0e1630b79de915bb8f05aef1f9e2162cb7 100644 --- a/net/irda/irias_object.c +++ b/net/irda/irias_object.c @@ -48,20 +48,18 @@ struct ias_object *irias_new_object( char *name, int id) { struct ias_object *obj; - IRDA_DEBUG( 4, "%s()\n", __func__); - obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); if (obj == NULL) { - IRDA_WARNING("%s(), Unable to allocate object!\n", - __func__); + net_warn_ratelimited("%s(), Unable to allocate object!\n", + __func__); return NULL; } obj->magic = IAS_OBJECT_MAGIC; obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC); if (!obj->name) { - IRDA_WARNING("%s(), Unable to allocate name!\n", - __func__); + net_warn_ratelimited("%s(), Unable to allocate name!\n", + __func__); kfree(obj); return NULL; } @@ -73,8 +71,8 @@ struct ias_object *irias_new_object( char *name, int id) obj->attribs = hashbin_new(HB_LOCK); if (obj->attribs == NULL) { - IRDA_WARNING("%s(), Unable to allocate attribs!\n", - __func__); + net_warn_ratelimited("%s(), Unable to allocate attribs!\n", + __func__); kfree(obj->name); kfree(obj); return NULL; @@ -134,8 +132,8 @@ int irias_delete_object(struct ias_object *obj) /* Remove from list */ node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj); if (!node) - IRDA_DEBUG( 0, "%s(), object already removed!\n", - __func__); + pr_debug("%s(), object already removed!\n", + __func__); /* Destroy */ __irias_delete_object(obj); @@ -269,8 +267,8 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name, /* Find object */ obj = hashbin_lock_find(irias_objects, 0, obj_name); if (obj == NULL) { - IRDA_WARNING("%s: Unable to find object: %s\n", __func__, - obj_name); + net_warn_ratelimited("%s: Unable to find object: %s\n", + __func__, obj_name); return -1; } @@ -280,15 +278,15 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name, /* Find attribute */ attrib = hashbin_find(obj->attribs, 0, attrib_name); if (attrib == NULL) { - IRDA_WARNING("%s: Unable to find attribute: %s\n", - __func__, attrib_name); + net_warn_ratelimited("%s: Unable to find attribute: %s\n", + __func__, attrib_name); spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); return -1; } if ( attrib->value->type != new_value->type) { - IRDA_DEBUG( 0, "%s(), changing value type not allowed!\n", - __func__); + pr_debug("%s(), changing value type not allowed!\n", + __func__); spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); return -1; } @@ -322,8 +320,8 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value, attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); return; } @@ -333,8 +331,8 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value, /* Insert value */ attrib->value = irias_new_integer_value(value); if (!attrib->name || !attrib->value) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); if (attrib->value) irias_delete_value(attrib->value); kfree(attrib->name); @@ -366,8 +364,8 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets, attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); return; } @@ -376,8 +374,8 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets, attrib->value = irias_new_octseq_value( octets, len); if (!attrib->name || !attrib->value) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); if (attrib->value) irias_delete_value(attrib->value); kfree(attrib->name); @@ -408,8 +406,8 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value, attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); return; } @@ -418,8 +416,8 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value, attrib->value = irias_new_string_value(value); if (!attrib->name || !attrib->value) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); if (attrib->value) irias_delete_value(attrib->value); kfree(attrib->name); @@ -442,10 +440,8 @@ struct ias_value *irias_new_integer_value(int integer) struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); - if (value == NULL) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (value == NULL) return NULL; - } value->type = IAS_INTEGER; value->len = 4; @@ -467,16 +463,14 @@ struct ias_value *irias_new_string_value(char *string) struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); - if (value == NULL) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (value == NULL) return NULL; - } value->type = IAS_STRING; value->charset = CS_ASCII; value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC); if (!value->t.string) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + net_warn_ratelimited("%s: Unable to kmalloc!\n", __func__); kfree(value); return NULL; } @@ -498,10 +492,8 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); - if (value == NULL) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (value == NULL) return NULL; - } value->type = IAS_OCT_SEQ; /* Check length */ @@ -511,7 +503,7 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC); if (value->t.oct_seq == NULL){ - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + net_warn_ratelimited("%s: Unable to kmalloc!\n", __func__); kfree(value); return NULL; } @@ -523,10 +515,8 @@ struct ias_value *irias_new_missing_value(void) struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); - if (value == NULL) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (value == NULL) return NULL; - } value->type = IAS_MISSING; @@ -541,8 +531,6 @@ struct ias_value *irias_new_missing_value(void) */ void irias_delete_value(struct ias_value *value) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(value != NULL, return;); switch (value->type) { @@ -559,7 +547,7 @@ void irias_delete_value(struct ias_value *value) kfree(value->t.oct_seq); break; default: - IRDA_DEBUG(0, "%s(), Unknown value type!\n", __func__); + pr_debug("%s(), Unknown value type!\n", __func__); break; } kfree(value); diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c index 42cf1390ce9c89736e18e6a6b61c14227c4c3e7b..c5837a40c78e3e728a6df2a113f502737216013e 100644 --- a/net/irda/irlan/irlan_client.c +++ b/net/irda/irlan/irlan_client.c @@ -72,8 +72,6 @@ static void irlan_client_kick_timer_expired(void *data) { struct irlan_cb *self = (struct irlan_cb *) data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -91,8 +89,6 @@ static void irlan_client_kick_timer_expired(void *data) static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - irda_start_timer(&self->client.kick_timer, timeout, (void *) self, irlan_client_kick_timer_expired); } @@ -105,8 +101,6 @@ static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) */ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) { - IRDA_DEBUG(1, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -117,7 +111,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) if ((self->client.state != IRLAN_IDLE) || (self->provider.access_type == ACCESS_DIRECT)) { - IRDA_DEBUG(0, "%s(), already awake!\n", __func__ ); + pr_debug("%s(), already awake!\n", __func__); return; } @@ -126,7 +120,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) self->daddr = daddr; if (self->disconnect_reason == LM_USER_REQUEST) { - IRDA_DEBUG(0, "%s(), still stopped by user\n", __func__ ); + pr_debug("%s(), still stopped by user\n", __func__); return; } @@ -153,8 +147,6 @@ void irlan_client_discovery_indication(discinfo_t *discovery, struct irlan_cb *self; __u32 saddr, daddr; - IRDA_DEBUG(1, "%s()\n", __func__ ); - IRDA_ASSERT(discovery != NULL, return;); /* @@ -175,8 +167,8 @@ void irlan_client_discovery_indication(discinfo_t *discovery, if (self) { IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;); - IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __func__ , - daddr); + pr_debug("%s(), Found instance (%08x)!\n", __func__ , + daddr); irlan_client_wakeup(self, saddr, daddr); } @@ -195,8 +187,6 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap, { struct irlan_cb *self; - IRDA_DEBUG(2, "%s()\n", __func__ ); - self = instance; IRDA_ASSERT(self != NULL, return -1;); @@ -206,7 +196,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap, irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb); /* Ready for a new command */ - IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __func__ ); + pr_debug("%s(), clearing tx_busy\n", __func__); self->client.tx_busy = FALSE; /* Check if we have some queued commands waiting to be sent */ @@ -223,7 +213,7 @@ static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap, struct tsap_cb *tsap; struct sk_buff *skb; - IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason); + pr_debug("%s(), reason=%d\n", __func__ , reason); self = instance; tsap = sap; @@ -255,8 +245,6 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self) struct tsap_cb *tsap; notify_t notify; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -275,7 +263,7 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self) tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); if (!tsap) { - IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); + pr_debug("%s(), Got no tsap!\n", __func__); return; } self->client.tsap_ctrl = tsap; @@ -295,8 +283,6 @@ static void irlan_client_ctrl_connect_confirm(void *instance, void *sap, { struct irlan_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__ ); - self = instance; IRDA_ASSERT(self != NULL, return;); @@ -323,34 +309,34 @@ static void print_ret_code(__u8 code) printk(KERN_INFO "Success\n"); break; case 1: - IRDA_WARNING("IrLAN: Insufficient resources\n"); + net_warn_ratelimited("IrLAN: Insufficient resources\n"); break; case 2: - IRDA_WARNING("IrLAN: Invalid command format\n"); + net_warn_ratelimited("IrLAN: Invalid command format\n"); break; case 3: - IRDA_WARNING("IrLAN: Command not supported\n"); + net_warn_ratelimited("IrLAN: Command not supported\n"); break; case 4: - IRDA_WARNING("IrLAN: Parameter not supported\n"); + net_warn_ratelimited("IrLAN: Parameter not supported\n"); break; case 5: - IRDA_WARNING("IrLAN: Value not supported\n"); + net_warn_ratelimited("IrLAN: Value not supported\n"); break; case 6: - IRDA_WARNING("IrLAN: Not open\n"); + net_warn_ratelimited("IrLAN: Not open\n"); break; case 7: - IRDA_WARNING("IrLAN: Authentication required\n"); + net_warn_ratelimited("IrLAN: Authentication required\n"); break; case 8: - IRDA_WARNING("IrLAN: Invalid password\n"); + net_warn_ratelimited("IrLAN: Invalid password\n"); break; case 9: - IRDA_WARNING("IrLAN: Protocol error\n"); + net_warn_ratelimited("IrLAN: Protocol error\n"); break; case 255: - IRDA_WARNING("IrLAN: Asynchronous status\n"); + net_warn_ratelimited("IrLAN: Asynchronous status\n"); break; } } @@ -374,13 +360,13 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) IRDA_ASSERT(skb != NULL, return;); - IRDA_DEBUG(4, "%s() skb->len=%d\n", __func__ , (int) skb->len); + pr_debug("%s() skb->len=%d\n", __func__ , (int)skb->len); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); if (!skb) { - IRDA_ERROR("%s(), Got NULL skb!\n", __func__); + net_err_ratelimited("%s(), Got NULL skb!\n", __func__); return; } frame = skb->data; @@ -405,7 +391,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) /* How many parameters? */ count = frame[1]; - IRDA_DEBUG(4, "%s(), got %d parameters\n", __func__ , count); + pr_debug("%s(), got %d parameters\n", __func__ , count); ptr = frame+2; @@ -413,7 +399,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) for (i=0; imagic == IRLAN_MAGIC, return;); @@ -475,13 +461,13 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, else if (strcmp(value, "HOSTED") == 0) self->client.access_type = ACCESS_HOSTED; else { - IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ ); + pr_debug("%s(), unknown access type!\n", __func__); } } /* IRLAN version */ if (strcmp(param, "IRLAN_VER") == 0) { - IRDA_DEBUG(4, "IrLAN version %d.%d\n", (__u8) value[0], - (__u8) value[1]); + pr_debug("IrLAN version %d.%d\n", (__u8)value[0], + (__u8)value[1]); self->version[0] = value[0]; self->version[1] = value[1]; @@ -490,37 +476,37 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, /* Which remote TSAP to use for data channel */ if (strcmp(param, "DATA_CHAN") == 0) { self->dtsap_sel_data = value[0]; - IRDA_DEBUG(4, "Data TSAP = %02x\n", self->dtsap_sel_data); + pr_debug("Data TSAP = %02x\n", self->dtsap_sel_data); return; } if (strcmp(param, "CON_ARB") == 0) { memcpy(&tmp_cpu, value, 2); /* Align value */ le16_to_cpus(&tmp_cpu); /* Convert to host order */ self->client.recv_arb_val = tmp_cpu; - IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __func__ , - self->client.recv_arb_val); + pr_debug("%s(), receive arb val=%d\n", __func__ , + self->client.recv_arb_val); } if (strcmp(param, "MAX_FRAME") == 0) { memcpy(&tmp_cpu, value, 2); /* Align value */ le16_to_cpus(&tmp_cpu); /* Convert to host order */ self->client.max_frame = tmp_cpu; - IRDA_DEBUG(4, "%s(), max frame=%d\n", __func__ , - self->client.max_frame); + pr_debug("%s(), max frame=%d\n", __func__ , + self->client.max_frame); } /* RECONNECT_KEY, in case the link goes down! */ if (strcmp(param, "RECONNECT_KEY") == 0) { - IRDA_DEBUG(4, "Got reconnect key: "); + pr_debug("Got reconnect key: "); /* for (i = 0; i < val_len; i++) */ /* printk("%02x", value[i]); */ memcpy(self->client.reconnect_key, value, val_len); self->client.key_len = val_len; - IRDA_DEBUG(4, "\n"); + pr_debug("\n"); } /* FILTER_ENTRY, have we got an ethernet address? */ if (strcmp(param, "FILTER_ENTRY") == 0) { bytes = value; - IRDA_DEBUG(4, "Ethernet address = %pM\n", bytes); + pr_debug("Ethernet address = %pM\n", bytes); for (i = 0; i < 6; i++) self->dev->dev_addr[i] = bytes[i]; } @@ -537,8 +523,6 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id, { struct irlan_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(priv != NULL, return;); self = priv; @@ -550,7 +534,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id, /* Check if request succeeded */ if (result != IAS_SUCCESS) { - IRDA_DEBUG(2, "%s(), got NULL value!\n", __func__ ); + pr_debug("%s(), got NULL value!\n", __func__); irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); return; @@ -568,7 +552,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id, irias_delete_value(value); break; default: - IRDA_DEBUG(2, "%s(), unknown type!\n", __func__ ); + pr_debug("%s(), unknown type!\n", __func__); break; } irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); diff --git a/net/irda/irlan/irlan_client_event.c b/net/irda/irlan/irlan_client_event.c index 8d5a8ebc444fcbd56db51e28241f58a27efc9ed9..cc93fabbbb199ede3758021ef13261958d3cc641 100644 --- a/net/irda/irlan/irlan_client_event.c +++ b/net/irda/irlan/irlan_client_event.c @@ -92,16 +92,14 @@ void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); switch (event) { case IRLAN_DISCOVERY_INDICATION: if (self->client.iriap) { - IRDA_WARNING("%s(), busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(), busy with a previous query\n", + __func__); return -EBUSY; } @@ -114,10 +112,10 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, "IrLAN", "IrDA:TinyTP:LsapSel"); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -136,8 +134,6 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -154,7 +150,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_CONN); break; case IRLAN_IAS_PROVIDER_NOT_AVAIL: - IRDA_DEBUG(2, "%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__ ); + pr_debug("%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__); irlan_next_client_state(self, IRLAN_IDLE); /* Give the client a kick! */ @@ -167,10 +163,10 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -189,8 +185,6 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch (event) { @@ -204,10 +198,10 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -224,8 +218,6 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch (event) { @@ -244,10 +236,10 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -266,8 +258,6 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -281,10 +271,10 @@ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -305,8 +295,6 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, { struct qos_info qos; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -344,7 +332,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_DATA); break; default: - IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ ); + pr_debug("%s(), unknown access type!\n", __func__); break; } break; @@ -353,10 +341,10 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } @@ -376,8 +364,6 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -390,10 +376,10 @@ static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -407,8 +393,6 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, { struct qos_info qos; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -429,7 +413,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, } else if (self->client.recv_arb_val > self->provider.send_arb_val) { - IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __func__ ); + pr_debug("%s(), lost the battle :-(\n", __func__); } break; case IRLAN_DATA_CONNECT_INDICATION: @@ -440,10 +424,10 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -462,8 +446,6 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -476,7 +458,7 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -494,8 +476,6 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (skb) dev_kfree_skb(skb); @@ -511,8 +491,6 @@ static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (skb) dev_kfree_skb(skb); diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index 5a2d0a69552993ce5e88bf7efa9ea8e3745424c1..481bbc2a4349300d51c66452a5362b4e1b4b4c81 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c @@ -118,8 +118,6 @@ static int __init irlan_init(void) struct irlan_cb *new; __u16 hints; - IRDA_DEBUG(2, "%s()\n", __func__ ); - #ifdef CONFIG_PROC_FS { struct proc_dir_entry *proc; proc = proc_create("irlan", 0, proc_irda, &irlan_fops); @@ -130,7 +128,6 @@ static int __init irlan_init(void) } #endif /* CONFIG_PROC_FS */ - IRDA_DEBUG(4, "%s()\n", __func__ ); hints = irlmp_service_to_hint(S_LAN); /* Register with IrLMP as a client */ @@ -173,8 +170,6 @@ static void __exit irlan_cleanup(void) { struct irlan_cb *self, *next; - IRDA_DEBUG(4, "%s()\n", __func__ ); - irlmp_unregister_client(ckey); irlmp_unregister_service(skey); @@ -201,8 +196,6 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr) struct net_device *dev; struct irlan_cb *self; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Create network device with irlan */ dev = alloc_irlandev(eth ? "eth%d" : "irlan%d"); if (!dev) @@ -245,8 +238,8 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr) irlan_next_provider_state(self, IRLAN_IDLE); if (register_netdev(dev)) { - IRDA_DEBUG(2, "%s(), register_netdev() failed!\n", - __func__ ); + pr_debug("%s(), register_netdev() failed!\n", + __func__); self = NULL; free_netdev(dev); } else { @@ -266,8 +259,6 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr) */ static void __irlan_close(struct irlan_cb *self) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - ASSERT_RTNL(); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -314,8 +305,6 @@ static void irlan_connect_indication(void *instance, void *sap, struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(2, "%s()\n", __func__ ); - self = instance; tsap = sap; @@ -326,7 +315,7 @@ static void irlan_connect_indication(void *instance, void *sap, self->max_sdu_size = max_sdu_size; self->max_header_size = max_header_size; - IRDA_DEBUG(0, "%s: We are now connected!\n", __func__); + pr_debug("%s: We are now connected!\n", __func__); del_timer(&self->watchdog_timer); @@ -370,7 +359,7 @@ static void irlan_connect_confirm(void *instance, void *sap, /* TODO: we could set the MTU depending on the max_sdu_size */ - IRDA_DEBUG(0, "%s: We are now connected!\n", __func__); + pr_debug("%s: We are now connected!\n", __func__); del_timer(&self->watchdog_timer); /* @@ -403,7 +392,7 @@ static void irlan_disconnect_indication(void *instance, struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(0, "%s(), reason=%d\n", __func__ , reason); + pr_debug("%s(), reason=%d\n", __func__ , reason); self = instance; tsap = sap; @@ -415,29 +404,30 @@ static void irlan_disconnect_indication(void *instance, IRDA_ASSERT(tsap == self->tsap_data, return;); - IRDA_DEBUG(2, "IrLAN, data channel disconnected by peer!\n"); + pr_debug("IrLAN, data channel disconnected by peer!\n"); /* Save reason so we know if we should try to reconnect or not */ self->disconnect_reason = reason; switch (reason) { case LM_USER_REQUEST: /* User request */ - IRDA_DEBUG(2, "%s(), User requested\n", __func__ ); + pr_debug("%s(), User requested\n", __func__); break; case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */ - IRDA_DEBUG(2, "%s(), Unexpected IrLAP disconnect\n", __func__ ); + pr_debug("%s(), Unexpected IrLAP disconnect\n", __func__); break; case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */ - IRDA_DEBUG(2, "%s(), IrLAP connect failed\n", __func__ ); + pr_debug("%s(), IrLAP connect failed\n", __func__); break; case LM_LAP_RESET: /* IrLAP reset */ - IRDA_DEBUG(2, "%s(), IrLAP reset\n", __func__ ); + pr_debug("%s(), IrLAP reset\n", __func__); break; case LM_INIT_DISCONNECT: - IRDA_DEBUG(2, "%s(), IrLMP connect failed\n", __func__ ); + pr_debug("%s(), IrLMP connect failed\n", __func__); break; default: - IRDA_ERROR("%s(), Unknown disconnect reason\n", __func__); + net_err_ratelimited("%s(), Unknown disconnect reason\n", + __func__); break; } @@ -459,8 +449,6 @@ void irlan_open_data_tsap(struct irlan_cb *self) struct tsap_cb *tsap; notify_t notify; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -481,7 +469,7 @@ void irlan_open_data_tsap(struct irlan_cb *self) tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); if (!tsap) { - IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); + pr_debug("%s(), Got no tsap!\n", __func__); return; } self->tsap_data = tsap; @@ -495,8 +483,6 @@ void irlan_open_data_tsap(struct irlan_cb *self) void irlan_close_tsaps(struct irlan_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -585,8 +571,6 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self) { struct sk_buff *skb; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (irda_lock(&self->client.tx_busy) == FALSE) return -EBUSY; @@ -604,7 +588,7 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self) dev_kfree_skb(skb); return -1; } - IRDA_DEBUG(2, "%s(), sending ...\n", __func__ ); + pr_debug("%s(), sending ...\n", __func__); return irttp_data_request(self->client.tsap_ctrl, skb); } @@ -617,8 +601,6 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self) */ static void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Queue command */ skb_queue_tail(&self->client.txq, skb); @@ -637,8 +619,6 @@ void irlan_get_provider_info(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -670,8 +650,6 @@ void irlan_open_data_channel(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -705,8 +683,6 @@ void irlan_close_data_channel(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -746,8 +722,6 @@ static void irlan_open_unicast_addr(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -788,8 +762,6 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -832,8 +804,6 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -877,8 +847,6 @@ static void irlan_get_unicast_addr(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -917,8 +885,6 @@ void irlan_get_media_char(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -1005,7 +971,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, int n=0; if (skb == NULL) { - IRDA_DEBUG(2, "%s(), Got NULL skb\n", __func__ ); + pr_debug("%s(), Got NULL skb\n", __func__); return 0; } @@ -1022,7 +988,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, IRDA_ASSERT(value_len > 0, return 0;); break; default: - IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __func__ ); + pr_debug("%s(), Unknown parameter type!\n", __func__); return 0; } @@ -1031,7 +997,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, /* Make space for data */ if (skb_tailroom(skb) < (param_len+value_len+3)) { - IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __func__ ); + pr_debug("%s(), No more space at end of skb\n", __func__); return 0; } skb_put(skb, param_len+value_len+3); @@ -1078,13 +1044,11 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) __u16 val_len; int n=0; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* get length of parameter name (1 byte) */ name_len = buf[n++]; if (name_len > 254) { - IRDA_DEBUG(2, "%s(), name_len > 254\n", __func__ ); + pr_debug("%s(), name_len > 254\n", __func__); return -RSP_INVALID_COMMAND_FORMAT; } @@ -1101,7 +1065,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) le16_to_cpus(&val_len); n+=2; if (val_len >= 1016) { - IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); + pr_debug("%s(), parameter length to long\n", __func__); return -RSP_INVALID_COMMAND_FORMAT; } *len = val_len; @@ -1111,8 +1075,8 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) value[val_len] = '\0'; n+=val_len; - IRDA_DEBUG(4, "Parameter: %s ", name); - IRDA_DEBUG(4, "Value: %s\n", value); + pr_debug("Parameter: %s ", name); + pr_debug("Value: %s\n", value); return n; } diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index dc13f1a45f2f7741d98d35d5299e78d4167f2d60..fcfbe579434ae8d4ffb989f0ac82b250512c5637 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -110,8 +110,6 @@ static int irlan_eth_open(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); - IRDA_DEBUG(2, "%s()\n", __func__); - /* Ready to play! */ netif_stop_queue(dev); /* Wait until data link is ready */ @@ -137,8 +135,6 @@ static int irlan_eth_close(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); - IRDA_DEBUG(2, "%s()\n", __func__); - /* Stop device */ netif_stop_queue(dev); @@ -231,8 +227,8 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb) return 0; } if (skb->len < ETH_HLEN) { - IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n", - __func__, skb->len); + pr_debug("%s() : IrLAN frame too short (%d)\n", + __func__, skb->len); dev->stats.rx_dropped++; dev_kfree_skb(skb); return 0; @@ -281,9 +277,9 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) IRDA_ASSERT(dev != NULL, return;); - IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __func__, - flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START", - netif_running(dev)); + pr_debug("%s() : flow %s ; running %d\n", __func__, + flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START", + netif_running(dev)); switch (flow) { case FLOW_STOP: @@ -310,32 +306,30 @@ static void irlan_eth_set_multicast_list(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); - IRDA_DEBUG(2, "%s()\n", __func__); - /* Check if data channel has been connected yet */ if (self->client.state != IRLAN_DATA) { - IRDA_DEBUG(1, "%s(), delaying!\n", __func__); + pr_debug("%s(), delaying!\n", __func__); return; } if (dev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ - IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n"); + net_warn_ratelimited("Promiscuous mode not implemented by IrLAN!\n"); } else if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > HW_MAX_ADDRS) { /* Disable promiscuous mode, use normal mode. */ - IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__); + pr_debug("%s(), Setting multicast filter\n", __func__); /* hardware_set_filter(NULL); */ irlan_set_multicast_filter(self, TRUE); } else if (!netdev_mc_empty(dev)) { - IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__); + pr_debug("%s(), Setting multicast filter\n", __func__); /* Walk the address list, and load the filter */ /* hardware_set_filter(dev->mc_list); */ irlan_set_multicast_filter(self, TRUE); } else { - IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__); + pr_debug("%s(), Clearing multicast filter\n", __func__); irlan_set_multicast_filter(self, FALSE); } diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c index 43f16040a6fe0f3016ae66136c84573e6d97be8f..9a1cc11c16f6a590d33b44729d5757e52eed8b24 100644 --- a/net/irda/irlan/irlan_event.c +++ b/net/irda/irlan/irlan_event.c @@ -40,7 +40,7 @@ const char * const irlan_state[] = { void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) { - IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]); + pr_debug("%s(), %s\n", __func__ , irlan_state[state]); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -50,7 +50,7 @@ void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state) { - IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]); + pr_debug("%s(), %s\n", __func__ , irlan_state[state]); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c index 7977be7caf0f090c8437dbc520073fe63c088274..e755e90b2f26485d869a8cf6e89dbc84a9d9562d 100644 --- a/net/irda/irlan/irlan_filter.c +++ b/net/irda/irlan/irlan_filter.c @@ -43,7 +43,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_operation == DYNAMIC)) { - IRDA_DEBUG(0, "Giving peer a dynamic Ethernet address\n"); + pr_debug("Giving peer a dynamic Ethernet address\n"); self->provider.mac_address[0] = 0x40; self->provider.mac_address[1] = 0x00; self->provider.mac_address[2] = 0x00; @@ -73,7 +73,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == FILTER)) { - IRDA_DEBUG(0, "Directed filter on\n"); + pr_debug("Directed filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -81,7 +81,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == NONE)) { - IRDA_DEBUG(0, "Directed filter off\n"); + pr_debug("Directed filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -90,7 +90,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == FILTER)) { - IRDA_DEBUG(0, "Broadcast filter on\n"); + pr_debug("Broadcast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -98,7 +98,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == NONE)) { - IRDA_DEBUG(0, "Broadcast filter off\n"); + pr_debug("Broadcast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -106,7 +106,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == FILTER)) { - IRDA_DEBUG(0, "Multicast filter on\n"); + pr_debug("Multicast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -114,7 +114,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == NONE)) { - IRDA_DEBUG(0, "Multicast filter off\n"); + pr_debug("Multicast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -122,7 +122,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_operation == GET)) { - IRDA_DEBUG(0, "Multicast filter get\n"); + pr_debug("Multicast filter get\n"); skb->data[0] = 0x00; /* Success? */ skb->data[1] = 0x02; irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); @@ -132,7 +132,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) skb->data[0] = 0x00; /* Command not supported */ skb->data[1] = 0x00; - IRDA_DEBUG(0, "Not implemented!\n"); + pr_debug("Not implemented!\n"); } /* @@ -143,18 +143,15 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) */ void irlan_check_command_param(struct irlan_cb *self, char *param, char *value) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - IRDA_DEBUG(4, "%s, %s\n", param, value); + pr_debug("%s, %s\n", param, value); /* * This is experimental!! DB. */ if (strcmp(param, "MODE") == 0) { - IRDA_DEBUG(0, "%s()\n", __func__ ); self->use_udata = TRUE; return; } diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c index 4664855222f414d7079168e0f8364f9dc1dee85c..15c292cf264436a0cc468cba0285a1ef0391e248 100644 --- a/net/irda/irlan/irlan_provider.c +++ b/net/irda/irlan/irlan_provider.c @@ -70,8 +70,6 @@ static int irlan_provider_data_indication(void *instance, void *sap, struct irlan_cb *self; __u8 code; - IRDA_DEBUG(4, "%s()\n", __func__ ); - self = instance; IRDA_ASSERT(self != NULL, return -1;); @@ -82,32 +80,32 @@ static int irlan_provider_data_indication(void *instance, void *sap, code = skb->data[0]; switch(code) { case CMD_GET_PROVIDER_INFO: - IRDA_DEBUG(4, "Got GET_PROVIDER_INFO command!\n"); + pr_debug("Got GET_PROVIDER_INFO command!\n"); irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb); break; case CMD_GET_MEDIA_CHAR: - IRDA_DEBUG(4, "Got GET_MEDIA_CHAR command!\n"); + pr_debug("Got GET_MEDIA_CHAR command!\n"); irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb); break; case CMD_OPEN_DATA_CHANNEL: - IRDA_DEBUG(4, "Got OPEN_DATA_CHANNEL command!\n"); + pr_debug("Got OPEN_DATA_CHANNEL command!\n"); irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb); break; case CMD_FILTER_OPERATION: - IRDA_DEBUG(4, "Got FILTER_OPERATION command!\n"); + pr_debug("Got FILTER_OPERATION command!\n"); irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb); break; case CMD_RECONNECT_DATA_CHAN: - IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __func__ ); - IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ ); + pr_debug("%s(), Got RECONNECT_DATA_CHAN command\n", __func__); + pr_debug("%s(), NOT IMPLEMENTED\n", __func__); break; case CMD_CLOSE_DATA_CHAN: - IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n"); - IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ ); + pr_debug("Got CLOSE_DATA_CHAN command!\n"); + pr_debug("%s(), NOT IMPLEMENTED\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ ); + pr_debug("%s(), Unknown command!\n", __func__); break; } return 0; @@ -128,8 +126,6 @@ static void irlan_provider_connect_indication(void *instance, void *sap, struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(0, "%s()\n", __func__ ); - self = instance; tsap = sap; @@ -179,7 +175,7 @@ static void irlan_provider_disconnect_indication(void *instance, void *sap, struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason); + pr_debug("%s(), reason=%d\n", __func__ , reason); self = instance; tsap = sap; @@ -233,7 +229,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd, IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;); - IRDA_DEBUG(4, "%s(), skb->len=%d\n", __func__ , (int)skb->len); + pr_debug("%s(), skb->len=%d\n", __func__ , (int)skb->len); IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;); @@ -255,7 +251,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd, /* How many parameters? */ count = frame[1]; - IRDA_DEBUG(4, "Got %d parameters\n", count); + pr_debug("Got %d parameters\n", count); ptr = frame+2; @@ -263,7 +259,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd, for (i=0; imagic == IRLAN_MAGIC, return;); @@ -320,7 +314,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, irlan_insert_string_param(skb, "MEDIA", "802.5"); break; default: - IRDA_DEBUG(2, "%s(), unknown media type!\n", __func__ ); + pr_debug("%s(), unknown media type!\n", __func__); break; } irlan_insert_short_param(skb, "IRLAN_VER", 0x0101); @@ -344,7 +338,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED"); break; default: - IRDA_DEBUG(2, "%s(), Unknown access type\n", __func__ ); + pr_debug("%s(), Unknown access type\n", __func__); break; } irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee); @@ -364,7 +358,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, irlan_filter_request(self, skb); break; default: - IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ ); + pr_debug("%s(), Unknown command!\n", __func__); break; } @@ -382,8 +376,6 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self) struct tsap_cb *tsap; notify_t notify; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -403,7 +395,7 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self) tsap = irttp_open_tsap(LSAP_ANY, 1, ¬ify); if (!tsap) { - IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); + pr_debug("%s(), Got no tsap!\n", __func__); return -1; } self->provider.tsap_ctrl = tsap; diff --git a/net/irda/irlan/irlan_provider_event.c b/net/irda/irlan/irlan_provider_event.c index 01a9d7c993eee12b5ef4e404f26282917934e455..9c4f7f51d6b5a5c1eb9f2d747bfa68b9d77843bb 100644 --- a/net/irda/irlan/irlan_provider_event.c +++ b/net/irda/irlan/irlan_provider_event.c @@ -72,8 +72,6 @@ void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -82,7 +80,7 @@ static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_provider_state( self, IRLAN_INFO); break; default: - IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -101,8 +99,6 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, { int ret; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -147,7 +143,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_provider_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -166,8 +162,6 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -186,7 +180,7 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_provider_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -205,8 +199,6 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -221,7 +213,7 @@ static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_provider_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) diff --git a/net/irda/irlap.c b/net/irda/irlap.c index a778df55f5d67123c332ce6400212cd59093bfd4..7f2cafddfb6e774da73e2def256df22191507b4c 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -60,8 +60,7 @@ static void __irlap_close(struct irlap_cb *self); static void irlap_init_qos_capabilities(struct irlap_cb *self, struct qos_info *qos_user); -#ifdef CONFIG_IRDA_DEBUG -static const char *const lap_reasons[] = { +static const char *const lap_reasons[] __maybe_unused = { "ERROR, NOT USED", "LAP_DISC_INDICATION", "LAP_NO_RESPONSE", @@ -71,7 +70,6 @@ static const char *const lap_reasons[] = { "LAP_PRIMARY_CONFLICT", "ERROR, NOT USED", }; -#endif /* CONFIG_IRDA_DEBUG */ int __init irlap_init(void) { @@ -85,8 +83,8 @@ int __init irlap_init(void) /* Allocate master array */ irlap = hashbin_new(HB_LOCK); if (irlap == NULL) { - IRDA_ERROR("%s: can't allocate irlap hashbin!\n", - __func__); + net_err_ratelimited("%s: can't allocate irlap hashbin!\n", + __func__); return -ENOMEM; } @@ -111,8 +109,6 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos, { struct irlap_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__); - /* Initialize the irlap structure. */ self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); if (self == NULL) @@ -213,8 +209,6 @@ void irlap_close(struct irlap_cb *self) { struct irlap_cb *lap; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -229,7 +223,7 @@ void irlap_close(struct irlap_cb *self) /* Be sure that we manage to remove ourself from the hash */ lap = hashbin_remove(irlap, self->saddr, NULL); if (!lap) { - IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__); + pr_debug("%s(), Didn't find myself!\n", __func__); return; } __irlap_close(lap); @@ -244,8 +238,6 @@ EXPORT_SYMBOL(irlap_close); */ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -263,8 +255,6 @@ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) */ void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) { - IRDA_DEBUG(4, "%s()\n", __func__); - irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL); } @@ -278,7 +268,7 @@ void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) void irlap_connect_request(struct irlap_cb *self, __u32 daddr, struct qos_info *qos_user, int sniff) { - IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr); + pr_debug("%s(), daddr=0x%08x\n", __func__, daddr); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -305,8 +295,6 @@ void irlap_connect_request(struct irlap_cb *self, __u32 daddr, */ void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -342,8 +330,6 @@ void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb, IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), return;); skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); @@ -389,8 +375,6 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), return;); skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); @@ -415,8 +399,6 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb) #ifdef CONFIG_IRDA_ULTRA void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -435,8 +417,6 @@ void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) */ void irlap_disconnect_request(struct irlap_cb *self) { - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -456,7 +436,7 @@ void irlap_disconnect_request(struct irlap_cb *self) irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); break; default: - IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__); + pr_debug("%s(), disconnect pending!\n", __func__); self->disconnect_pending = TRUE; break; } @@ -470,7 +450,7 @@ void irlap_disconnect_request(struct irlap_cb *self) */ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) { - IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]); + pr_debug("%s(), reason=%s\n", __func__, lap_reasons[reason]); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -480,7 +460,7 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) switch (reason) { case LAP_RESET_INDICATION: - IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__); + pr_debug("%s(), Sending reset request!\n", __func__); irlap_do_event(self, RESET_REQUEST, NULL, NULL); break; case LAP_NO_RESPONSE: /* FALLTHROUGH */ @@ -491,7 +471,8 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) reason, NULL); break; default: - IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason); + net_err_ratelimited("%s: Unknown reason %d\n", + __func__, reason); } } @@ -509,7 +490,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); - IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots); + pr_debug("%s(), nslots = %d\n", __func__, discovery->nslots); IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) || (discovery->nslots == 8) || (discovery->nslots == 16), @@ -517,8 +498,8 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) /* Discovery is only possible in NDM mode */ if (self->state != LAP_NDM) { - IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n", - __func__); + pr_debug("%s(), discovery only possible in NDM mode\n", + __func__); irlap_discovery_confirm(self, NULL); /* Note : in theory, if we are not in NDM, we could postpone * the discovery like we do for connection request. @@ -540,8 +521,8 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) self->discovery_log = hashbin_new(HB_NOLOCK); if (self->discovery_log == NULL) { - IRDA_WARNING("%s(), Unable to allocate discovery log!\n", - __func__); + net_warn_ratelimited("%s(), Unable to allocate discovery log!\n", + __func__); return; } @@ -596,8 +577,6 @@ void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log) */ void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); @@ -625,10 +604,10 @@ void irlap_status_indication(struct irlap_cb *self, int quality_of_link) { switch (quality_of_link) { case STATUS_NO_ACTIVITY: - IRDA_MESSAGE("IrLAP, no activity on link!\n"); + net_info_ratelimited("IrLAP, no activity on link!\n"); break; case STATUS_NOISY: - IRDA_MESSAGE("IrLAP, noisy link!\n"); + net_info_ratelimited("IrLAP, noisy link!\n"); break; default: break; @@ -642,8 +621,6 @@ void irlap_status_indication(struct irlap_cb *self, int quality_of_link) */ void irlap_reset_indication(struct irlap_cb *self) { - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -658,7 +635,6 @@ void irlap_reset_indication(struct irlap_cb *self) */ void irlap_reset_confirm(void) { - IRDA_DEBUG(1, "%s()\n", __func__); } /* @@ -758,7 +734,7 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr) { /* nr as expected? */ if (nr == self->vs) { - IRDA_DEBUG(4, "%s(), expected!\n", __func__); + pr_debug("%s(), expected!\n", __func__); return NR_EXPECTED; } @@ -786,8 +762,6 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr) */ void irlap_initiate_connection_state(struct irlap_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -869,7 +843,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now) { struct sk_buff *skb; - IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed); + pr_debug("%s(), setting speed to %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -912,7 +886,7 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self, * user may not have set all of them. */ if (qos_user) { - IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__); + pr_debug("%s(), Found user specified QoS!\n", __func__); if (qos_user->baud_rate.bits) self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits; @@ -942,8 +916,6 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self, */ void irlap_apply_default_connection_parameters(struct irlap_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -1005,8 +977,6 @@ void irlap_apply_default_connection_parameters(struct irlap_cb *self) */ void irlap_apply_connection_parameters(struct irlap_cb *self, int now) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -1085,12 +1055,12 @@ void irlap_apply_connection_parameters(struct irlap_cb *self, int now) self->N1 = sysctl_warn_noreply_time * 1000 / self->qos_rx.max_turn_time.value; - IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1); + pr_debug("Setting N1 = %d\n", self->N1); /* Set N2 to match our own disconnect time */ self->N2 = self->qos_tx.link_disc_time.value * 1000 / self->qos_rx.max_turn_time.value; - IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2); + pr_debug("Setting N2 = %d\n", self->N2); } #ifdef CONFIG_PROC_FS diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index ccd214f9d1969e159fceea50d04436a24254e5d9..0e1b4d79f7458a9143b783830a4085421aabb980 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c @@ -78,8 +78,7 @@ static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event, static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event, struct sk_buff *, struct irlap_info *); -#ifdef CONFIG_IRDA_DEBUG -static const char *const irlap_event[] = { +static const char *const irlap_event[] __maybe_unused = { "DISCOVERY_REQUEST", "CONNECT_REQUEST", "CONNECT_RESPONSE", @@ -119,7 +118,6 @@ static const char *const irlap_event[] = { "BACKOFF_TIMER_EXPIRED", "MEDIA_BUSY_TIMER_EXPIRED", }; -#endif /* CONFIG_IRDA_DEBUG */ const char *const irlap_state[] = { "LAP_NDM", @@ -218,7 +216,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout) } else self->fast_RR = FALSE; - IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies); + pr_debug("%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies); #endif /* CONFIG_IRDA_FAST_RR */ if (timeout == 0) @@ -242,8 +240,8 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, if (!self || self->magic != LAP_MAGIC) return; - IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __func__, - irlap_event[event], irlap_state[self->state]); + pr_debug("%s(), event = %s, state = %s\n", __func__, + irlap_event[event], irlap_state[self->state]); ret = (*state[self->state])(self, event, skb, info); @@ -260,8 +258,8 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, * try to disconnect link if we send any data frames, since * that will change the state away form XMIT */ - IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, - skb_queue_len(&self->txq)); + pr_debug("%s() : queue len = %d\n", __func__, + skb_queue_len(&self->txq)); if (!skb_queue_empty(&self->txq)) { /* Prevent race conditions with irlap_data_request() */ @@ -340,8 +338,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, /* Note : this will never happen, because we test * media busy in irlap_connect_request() and * postpone the event... - Jean II */ - IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n", - __func__); + pr_debug("%s(), CONNECT_REQUEST: media busy!\n", + __func__); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); @@ -367,16 +365,16 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, irlap_connect_indication(self, skb); } else { - IRDA_DEBUG(0, "%s(), SNRM frame does not " - "contain an I field!\n", __func__); + pr_debug("%s(), SNRM frame does not contain an I field!\n", + __func__); } break; case DISCOVERY_REQUEST: IRDA_ASSERT(info != NULL, return -1;); if (self->media_busy) { - IRDA_DEBUG(1, "%s(), DISCOVERY_REQUEST: media busy!\n", - __func__); + pr_debug("%s(), DISCOVERY_REQUEST: media busy!\n", + __func__); /* irlap->log.condition = MEDIA_BUSY; */ /* This will make IrLMP try again */ @@ -442,7 +440,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, * those cases... * Jean II */ - IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __func__); + pr_debug("%s(), Receiving final discovery request, missed the discovery slots :-(\n", + __func__); /* Last discovery request -> in the log */ irlap_discovery_indication(self, info->discovery); @@ -520,8 +519,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, case RECV_UI_FRAME: /* Only accept broadcast frames in NDM mode */ if (info->caddr != CBROADCAST) { - IRDA_DEBUG(0, "%s(), not a broadcast frame!\n", - __func__); + pr_debug("%s(), not a broadcast frame!\n", + __func__); } else irlap_unitdata_indication(self, skb); break; @@ -537,11 +536,11 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, irlap_send_test_frame(self, CBROADCAST, info->daddr, skb); break; case RECV_TEST_RSP: - IRDA_DEBUG(0, "%s() not implemented!\n", __func__); + pr_debug("%s() not implemented!\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, - irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", __func__, + irlap_event[event]); ret = -1; break; @@ -568,13 +567,12 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, IRDA_ASSERT(info != NULL, return -1;); IRDA_ASSERT(info->discovery != NULL, return -1;); - IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__, - info->discovery->data.daddr); + pr_debug("%s(), daddr=%08x\n", __func__, + info->discovery->data.daddr); if (!self->discovery_log) { - IRDA_WARNING("%s: discovery log is gone! " - "maybe the discovery timeout has been set" - " too short?\n", __func__); + net_warn_ratelimited("%s: discovery log is gone! maybe the discovery timeout has been set too short?\n", + __func__); break; } hashbin_insert(self->discovery_log, @@ -599,7 +597,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, IRDA_ASSERT(info != NULL, return -1;); - IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __func__, info->s); + pr_debug("%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", + __func__, info->s); /* Last discovery request ? */ if (info->s == 0xff) @@ -613,8 +612,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, * timing requirements. */ if (irda_device_is_receiving(self->netdev) && !self->add_wait) { - IRDA_DEBUG(2, "%s(), device is slow to answer, " - "waiting some more!\n", __func__); + pr_debug("%s(), device is slow to answer, waiting some more!\n", + __func__); irlap_start_slot_timer(self, msecs_to_jiffies(10)); self->add_wait = TRUE; return ret; @@ -650,8 +649,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, } break; default: - IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, - irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", __func__, + irlap_event[event]); ret = -1; break; @@ -672,15 +671,13 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, discovery_t *discovery_rsp; int ret=0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case QUERY_TIMER_EXPIRED: - IRDA_DEBUG(0, "%s(), QUERY_TIMER_EXPIRED <%ld>\n", - __func__, jiffies); + pr_debug("%s(), QUERY_TIMER_EXPIRED <%ld>\n", + __func__, jiffies); irlap_next_state(self, LAP_NDM); break; case RECV_DISCOVERY_XID_CMD: @@ -718,8 +715,8 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, } break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, %s\n", __func__, + event, irlap_event[event]); ret = -1; break; @@ -739,7 +736,7 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]); + pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -799,20 +796,20 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event, break; case RECV_DISCOVERY_XID_CMD: - IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n", - __func__); + pr_debug("%s(), event RECV_DISCOVER_XID_CMD!\n", + __func__); irlap_next_state(self, LAP_NDM); break; case DISCONNECT_REQUEST: - IRDA_DEBUG(0, "%s(), Disconnect request!\n", __func__); + pr_debug("%s(), Disconnect request!\n", __func__); irlap_send_dm_frame(self); irlap_next_state( self, LAP_NDM); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, %s\n", __func__, + event, irlap_event[event]); ret = -1; break; @@ -833,8 +830,6 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -862,7 +857,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, self->retry_count++; break; case RECV_SNRM_CMD: - IRDA_DEBUG(4, "%s(), SNRM battle!\n", __func__); + pr_debug("%s(), SNRM battle!\n", __func__); IRDA_ASSERT(skb != NULL, return 0;); IRDA_ASSERT(info != NULL, return 0;); @@ -949,8 +944,8 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, %s\n", __func__, + event, irlap_event[event]); ret = -1; break; @@ -967,7 +962,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { - IRDA_DEBUG( 0, "%s(), Unknown event\n", __func__); + pr_debug("%s(), Unknown event\n", __func__); return -1; } @@ -1030,8 +1025,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, * speed and turn-around-time. */ if((!nextfit) && (skb->len > self->bytes_left)) { - IRDA_DEBUG(0, "%s(), Not allowed to transmit" - " more bytes!\n", __func__); + pr_debug("%s(), Not allowed to transmit more bytes!\n", + __func__); /* Requeue the skb */ skb_queue_head(&self->txq, skb_get(skb)); /* @@ -1082,8 +1077,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, self->fast_RR = FALSE; #endif /* CONFIG_IRDA_FAST_RR */ } else { - IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n", - __func__); + pr_debug("%s(), Unable to send! remote busy?\n", + __func__); skb_queue_head(&self->txq, skb_get(skb)); /* @@ -1094,8 +1089,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, } break; case POLL_TIMER_EXPIRED: - IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED <%ld>\n", - __func__, jiffies); + pr_debug("%s(), POLL_TIMER_EXPIRED <%ld>\n", + __func__, jiffies); irlap_send_rr_frame(self, CMD_FRAME); /* Return to NRM properly - Jean II */ self->window = self->window_size; @@ -1120,8 +1115,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, * when we return... - Jean II */ break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s\n", - __func__, irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", + __func__, irlap_event[event]); ret = -EINVAL; break; @@ -1139,8 +1134,6 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1174,7 +1167,7 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event, } break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); ret = -1; break; @@ -1296,9 +1289,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, /* Keep state */ irlap_next_state(self, LAP_NRM_P); } else { - IRDA_DEBUG(4, - "%s(), missing or duplicate frame!\n", - __func__); + pr_debug("%s(), missing or duplicate frame!\n", + __func__); /* Update Nr received */ irlap_update_nr_received(self, info->nr); @@ -1367,8 +1359,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_UNEXPECTED)) { - IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n", - __func__); + pr_debug("%s(), unexpected nr and ns!\n", + __func__); if (info->pf) { /* Resend rejected frames */ irlap_resend_rejected_frames(self, CMD_FRAME); @@ -1408,9 +1400,9 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, } break; } - IRDA_DEBUG(1, "%s(), Not implemented!\n", __func__); - IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n", - __func__, irlap_event[event], ns_status, nr_status); + pr_debug("%s(), Not implemented!\n", __func__); + pr_debug("%s(), event=%s, ns_status=%d, nr_status=%d\n", + __func__, irlap_event[event], ns_status, nr_status); break; case RECV_UI_FRAME: /* Poll bit cleared? */ @@ -1421,7 +1413,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, del_timer(&self->final_timer); irlap_data_indication(self, skb, TRUE); irlap_next_state(self, LAP_XMIT_P); - IRDA_DEBUG(1, "%s: RECV_UI_FRAME: next state %s\n", __func__, irlap_state[self->state]); + pr_debug("%s: RECV_UI_FRAME: next state %s\n", + __func__, irlap_state[self->state]); irlap_start_poll_timer(self, self->poll_timeout); } break; @@ -1464,10 +1457,9 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, /* Update Nr received */ irlap_update_nr_received(self, info->nr); - IRDA_DEBUG(4, "RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, " - "vs=%d, vr=%d\n", - self->retry_count, info->nr, self->va, - self->vs, self->vr); + pr_debug("RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, vs=%d, vr=%d\n", + self->retry_count, info->nr, self->va, + self->vs, self->vr); /* Resend rejected frames */ irlap_resend_rejected_frames(self, CMD_FRAME); @@ -1475,8 +1467,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, irlap_next_state(self, LAP_NRM_P); } else if (ret == NR_INVALID) { - IRDA_DEBUG(1, "%s(), Received RR with " - "invalid nr !\n", __func__); + pr_debug("%s(), Received RR with invalid nr !\n", + __func__); irlap_next_state(self, LAP_RESET_WAIT); @@ -1512,8 +1504,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, * we only do this once for each frame. */ if (irda_device_is_receiving(self->netdev) && !self->add_wait) { - IRDA_DEBUG(1, "FINAL_TIMER_EXPIRED when receiving a " - "frame! Waiting a little bit more!\n"); + pr_debug("FINAL_TIMER_EXPIRED when receiving a frame! Waiting a little bit more!\n"); irlap_start_final_timer(self, msecs_to_jiffies(300)); /* @@ -1530,18 +1521,18 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, if (self->retry_count < self->N2) { if (skb_peek(&self->wx_list) == NULL) { /* Retry sending the pf bit to the secondary */ - IRDA_DEBUG(4, "nrm_p: resending rr"); + pr_debug("nrm_p: resending rr"); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, CMD_FRAME); } else { - IRDA_DEBUG(4, "nrm_p: resend frames"); + pr_debug("nrm_p: resend frames"); irlap_resend_rejected_frames(self, CMD_FRAME); } irlap_start_final_timer(self, self->final_timeout); self->retry_count++; - IRDA_DEBUG(4, "irlap_state_nrm_p: FINAL_TIMER_EXPIRED:" - " retry_count=%d\n", self->retry_count); + pr_debug("irlap_state_nrm_p: FINAL_TIMER_EXPIRED: retry_count=%d\n", + self->retry_count); /* Early warning event. I'm using a pretty liberal * interpretation of the spec and generate an event @@ -1581,7 +1572,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, irlap_start_final_timer(self, 2 * self->final_timeout); break; case RECV_RD_RSP: - IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __func__); + pr_debug("%s(), RECV_RD_RSP\n", __func__); irlap_flush_all_queues(self); irlap_next_state(self, LAP_XMIT_P); @@ -1589,8 +1580,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, irlap_disconnect_request(self); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %s\n", - __func__, irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", + __func__, irlap_event[event]); ret = -1; break; @@ -1610,7 +1601,7 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]); + pr_debug("%s(), event = %s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1636,8 +1627,8 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event, irlap_next_state( self, LAP_PCLOSE); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, - irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", __func__, + irlap_event[event]); ret = -1; break; @@ -1657,7 +1648,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]); + pr_debug("%s(), event = %s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1715,7 +1706,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, * state */ if (!info) { - IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __func__); + pr_debug("%s(), RECV_SNRM_CMD\n", __func__); irlap_initiate_connection_state(self); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_ua_response_frame(self, &self->qos_rx); @@ -1723,14 +1714,13 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, irlap_start_wd_timer(self, self->wd_timeout); irlap_next_state(self, LAP_NDM); } else { - IRDA_DEBUG(0, - "%s(), SNRM frame contained an I field!\n", - __func__); + pr_debug("%s(), SNRM frame contained an I field!\n", + __func__); } break; default: - IRDA_DEBUG(1, "%s(), Unknown event %s\n", - __func__, irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", + __func__, irlap_event[event]); ret = -1; break; @@ -1750,7 +1740,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[event]); + pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -ENODEV;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); @@ -1786,8 +1776,8 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, * speed and turn-around-time. */ if((!nextfit) && (skb->len > self->bytes_left)) { - IRDA_DEBUG(0, "%s(), Not allowed to transmit" - " more bytes!\n", __func__); + pr_debug("%s(), Not allowed to transmit more bytes!\n", + __func__); /* Requeue the skb */ skb_queue_head(&self->txq, skb_get(skb)); @@ -1833,7 +1823,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, ret = -EPROTO; } } else { - IRDA_DEBUG(2, "%s(), Unable to send!\n", __func__); + pr_debug("%s(), Unable to send!\n", __func__); skb_queue_head(&self->txq, skb_get(skb)); ret = -EPROTO; } @@ -1849,8 +1839,8 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, * when we return... - Jean II */ break; default: - IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, - irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", __func__, + irlap_event[event]); ret = -EINVAL; break; @@ -1872,7 +1862,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, int nr_status; int ret = 0; - IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]); + pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1880,10 +1870,9 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, switch (event) { case RECV_I_CMD: /* Optimize for the common case */ /* FIXME: must check for remote_busy below */ - IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, " - "vr=%d, pf=%d\n", __func__, - irlap_event[event], info->nr, - self->vs, info->ns, self->vr, info->pf); + pr_debug("%s(), event=%s nr=%d, vs=%d, ns=%d, vr=%d, pf=%d\n", + __func__, irlap_event[event], info->nr, + self->vs, info->ns, self->vr, info->pf); self->retry_count = 0; @@ -1983,7 +1972,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED)) { if (info->pf) { - IRDA_DEBUG(4, "RECV_I_RSP: frame(s) lost\n"); + pr_debug("RECV_I_RSP: frame(s) lost\n"); self->vr = (self->vr + 1) % 8; @@ -2020,10 +2009,10 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, } if (ret == NR_INVALID) { - IRDA_DEBUG(0, "NRM_S, NR_INVALID not implemented!\n"); + pr_debug("NRM_S, NR_INVALID not implemented!\n"); } if (ret == NS_INVALID) { - IRDA_DEBUG(0, "NRM_S, NS_INVALID not implemented!\n"); + pr_debug("NRM_S, NS_INVALID not implemented!\n"); } break; case RECV_UI_FRAME: @@ -2112,22 +2101,21 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, /* Keep state */ irlap_next_state(self, LAP_NRM_S); } else { - IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n", - __func__); + pr_debug("%s(), invalid nr not implemented!\n", + __func__); } break; case RECV_SNRM_CMD: /* SNRM frame is not allowed to contain an I-field */ if (!info) { del_timer(&self->wd_timer); - IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __func__); + pr_debug("%s(), received SNRM cmd\n", __func__); irlap_next_state(self, LAP_RESET_CHECK); irlap_reset_indication(self); } else { - IRDA_DEBUG(0, - "%s(), SNRM frame contained an I-field!\n", - __func__); + pr_debug("%s(), SNRM frame contained an I-field!\n", + __func__); } break; @@ -2159,8 +2147,8 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, * which explain why we use (self->N2 / 2) here !!! * Jean II */ - IRDA_DEBUG(1, "%s(), retry_count = %d\n", __func__, - self->retry_count); + pr_debug("%s(), retry_count = %d\n", __func__, + self->retry_count); if (self->retry_count < (self->N2 / 2)) { /* No retry, just wait for primary */ @@ -2212,8 +2200,8 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, irlap_send_test_frame(self, self->caddr, info->daddr, skb); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, (%s)\n", __func__, + event, irlap_event[event]); ret = -EINVAL; break; @@ -2227,8 +2215,6 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -ENODEV;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); @@ -2284,8 +2270,8 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, break; /* stay in SCLOSE */ } - IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, (%s)\n", __func__, + event, irlap_event[event]); break; } @@ -2299,7 +2285,7 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(1, "%s(), event=%s\n", __func__, irlap_event[event]); + pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -ENODEV;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); @@ -2320,8 +2306,8 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event, irlap_next_state(self, LAP_SCLOSE); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, (%s)\n", __func__, + event, irlap_event[event]); ret = -EINVAL; break; diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index a37998c6273d163eeb2cbbbce4182aea82bee418..b936b1251a66d11c4a1bdde2405f8303e3576c5d 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -103,8 +103,8 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) irlap_insert_info(self, skb); if (unlikely(self->mode & IRDA_MODE_MONITOR)) { - IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __func__, - self->netdev->name); + pr_debug("%s(): %s is in monitor mode\n", __func__, + self->netdev->name); dev_kfree_skb(skb); return; } @@ -182,8 +182,8 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb, /* Check if the new connection address is valid */ if ((info->caddr == 0x00) || (info->caddr == 0xfe)) { - IRDA_DEBUG(3, "%s(), invalid connection address!\n", - __func__); + pr_debug("%s(), invalid connection address!\n", + __func__); return; } @@ -193,8 +193,8 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb, /* Only accept if addressed directly to us */ if (info->saddr != self->saddr) { - IRDA_DEBUG(2, "%s(), not addressed to us!\n", - __func__); + pr_debug("%s(), not addressed to us!\n", + __func__); return; } irlap_do_event(self, RECV_SNRM_CMD, skb, info); @@ -216,7 +216,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos) struct ua_frame *frame; int ret; - IRDA_DEBUG(2, "%s() <%ld>\n", __func__, jiffies); + pr_debug("%s() <%ld>\n", __func__, jiffies); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -291,8 +291,6 @@ void irlap_send_disc_frame(struct irlap_cb *self) struct sk_buff *tx_skb = NULL; struct disc_frame *frame; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -322,8 +320,8 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s, __u32 bcast = BROADCAST; __u8 *info; - IRDA_DEBUG(4, "%s(), s=%d, S=%d, command=%d\n", __func__, - s, S, command); + pr_debug("%s(), s=%d, S=%d, command=%d\n", __func__, + s, S, command); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -415,13 +413,11 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, __u8 *discovery_info; char *text; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); return; } @@ -432,13 +428,13 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, /* Make sure frame is addressed to us */ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { - IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", - __func__); + pr_debug("%s(), frame is not addressed to us!\n", + __func__); return; } if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { - IRDA_WARNING("%s: kmalloc failed!\n", __func__); + net_warn_ratelimited("%s: kmalloc failed!\n", __func__); return; } @@ -446,15 +442,15 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, discovery->data.saddr = self->saddr; discovery->timestamp = jiffies; - IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__, - discovery->data.daddr); + pr_debug("%s(), daddr=%08x\n", __func__, + discovery->data.daddr); discovery_info = skb_pull(skb, sizeof(struct xid_frame)); /* Get info returned from peer */ discovery->data.hints[0] = discovery_info[0]; if (discovery_info[0] & HINT_EXTENSION) { - IRDA_DEBUG(4, "EXTENSION\n"); + pr_debug("EXTENSION\n"); discovery->data.hints[1] = discovery_info[1]; discovery->data.charset = discovery_info[2]; text = (char *) &discovery_info[3]; @@ -492,7 +488,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, char *text; if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); return; } @@ -503,8 +499,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, /* Make sure frame is addressed to us */ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { - IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", - __func__); + pr_debug("%s(), frame is not addressed to us!\n", + __func__); return; } @@ -536,8 +532,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, /* Check if things are sane at this point... */ if((discovery_info == NULL) || !pskb_may_pull(skb, 3)) { - IRDA_ERROR("%s: discovery frame too short!\n", - __func__); + net_err_ratelimited("%s: discovery frame too short!\n", + __func__); return; } @@ -545,10 +541,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, * We now have some discovery info to deliver! */ discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC); - if (!discovery) { - IRDA_WARNING("%s: unable to malloc!\n", __func__); + if (!discovery) return; - } discovery->data.daddr = info->daddr; discovery->data.saddr = self->saddr; @@ -658,7 +652,7 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb, { info->nr = skb->data[1] >> 5; - IRDA_DEBUG(4, "%s(), nr=%d, %ld\n", __func__, info->nr, jiffies); + pr_debug("%s(), nr=%d, %ld\n", __func__, info->nr, jiffies); if (command) irlap_do_event(self, RECV_RNR_CMD, skb, info); @@ -669,8 +663,6 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb, static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { - IRDA_DEBUG(0, "%s()\n", __func__); - info->nr = skb->data[1] >> 5; /* Check if this is a command or a response frame */ @@ -683,8 +675,6 @@ static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { - IRDA_DEBUG(0, "%s()\n", __func__); - info->nr = skb->data[1] >> 5; /* Check if this is a command or a response frame */ @@ -697,8 +687,6 @@ static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Check if this is a command or a response frame */ if (command) irlap_do_event(self, RECV_DISC_CMD, skb, info); @@ -756,7 +744,7 @@ void irlap_send_data_primary(struct irlap_cb *self, struct sk_buff *skb) irlap_send_i_frame( self, tx_skb, CMD_FRAME); } else { - IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__); + pr_debug("%s(), sending unreliable frame\n", __func__); irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); self->window -= 1; } @@ -809,7 +797,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) irlap_next_state(self, LAP_NRM_P); irlap_send_i_frame(self, tx_skb, CMD_FRAME); } else { - IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__); + pr_debug("%s(), sending unreliable frame\n", __func__); if (self->ack_required) { irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); @@ -836,7 +824,9 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) * See max_line_capacities[][] in qos.c for details. Jean II */ transmission_time -= (self->final_timeout * self->bytes_left / self->line_capacity); - IRDA_DEBUG(4, "%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", __func__, self->final_timeout, self->bytes_left, self->line_capacity, transmission_time); + pr_debug("%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", + __func__, self->final_timeout, self->bytes_left, + self->line_capacity, transmission_time); /* We are allowed to transmit a maximum number of bytes again. */ self->bytes_left = self->line_capacity; @@ -997,7 +987,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ tx_skb = skb_copy(skb, GFP_ATOMIC); if (!tx_skb) { - IRDA_DEBUG(0, "%s(), unable to copy\n", __func__); + pr_debug("%s(), unable to copy\n", __func__); return; } @@ -1020,7 +1010,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) */ while (!skb_queue_empty(&self->txq)) { - IRDA_DEBUG(0, "%s(), sending additional frames!\n", __func__); + pr_debug("%s(), sending additional frames!\n", __func__); if (self->window > 0) { skb = skb_dequeue( &self->txq); IRDA_ASSERT(skb != NULL, return;); @@ -1060,7 +1050,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command) /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ tx_skb = skb_copy(skb, GFP_ATOMIC); if (!tx_skb) { - IRDA_DEBUG(0, "%s(), unable to copy\n", __func__); + pr_debug("%s(), unable to copy\n", __func__); return; } @@ -1083,8 +1073,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command) void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb, __u8 caddr, int command) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -1143,8 +1131,6 @@ static inline void irlap_recv_i_frame(struct irlap_cb *self, static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) { - IRDA_DEBUG( 4, "%s()\n", __func__); - info->pf = skb->data[1] & PF_BIT; /* Final bit */ irlap_do_event(self, RECV_UI_FRAME, skb, info); @@ -1162,15 +1148,13 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb, __u8 *frame; int w, x, y, z; - IRDA_DEBUG(0, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(info != NULL, return;); if (!pskb_may_pull(skb, 4)) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); return; } @@ -1186,21 +1170,16 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb, z = frame[3] & 0x08; if (w) { - IRDA_DEBUG(0, "Rejected control field is undefined or not " - "implemented.\n"); + pr_debug("Rejected control field is undefined or not implemented\n"); } if (x) { - IRDA_DEBUG(0, "Rejected control field was invalid because it " - "contained a non permitted I field.\n"); + pr_debug("Rejected control field was invalid because it contained a non permitted I field\n"); } if (y) { - IRDA_DEBUG(0, "Received I field exceeded the maximum negotiated " - "for the existing connection or exceeded the maximum " - "this station supports if no connection exists.\n"); + pr_debug("Received I field exceeded the maximum negotiated for the existing connection or exceeded the maximum this station supports if no connection exists\n"); } if (z) { - IRDA_DEBUG(0, "Rejected control field control field contained an " - "invalid Nr count.\n"); + pr_debug("Rejected control field control field contained an invalid Nr count\n"); } irlap_do_event(self, RECV_FRMR_RSP, skb, info); } @@ -1256,10 +1235,8 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb, { struct test_frame *frame; - IRDA_DEBUG(2, "%s()\n", __func__); - if (!pskb_may_pull(skb, sizeof(*frame))) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); return; } frame = (struct test_frame *) skb->data; @@ -1267,8 +1244,8 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb, /* Broadcast frames must carry saddr and daddr fields */ if (info->caddr == CBROADCAST) { if (skb->len < sizeof(struct test_frame)) { - IRDA_DEBUG(0, "%s() test frame too short!\n", - __func__); + pr_debug("%s() test frame too short!\n", + __func__); return; } @@ -1328,13 +1305,13 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, * share and non linear skbs. This should never happen, so * we don't need to be clever about it. Jean II */ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { - IRDA_ERROR("%s: can't clone shared skb!\n", __func__); + net_err_ratelimited("%s: can't clone shared skb!\n", __func__); goto err; } /* Check if frame is large enough for parsing */ if (!pskb_may_pull(skb, 2)) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); goto err; } @@ -1348,8 +1325,8 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, /* First we check if this frame has a valid connection address */ if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) { - IRDA_DEBUG(0, "%s(), wrong connection address!\n", - __func__); + pr_debug("%s(), wrong connection address!\n", + __func__); goto out; } /* @@ -1383,8 +1360,8 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, irlap_recv_srej_frame(self, skb, &info, command); break; default: - IRDA_WARNING("%s: Unknown S-frame %02x received!\n", - __func__, info.control); + net_warn_ratelimited("%s: Unknown S-frame %02x received!\n", + __func__, info.control); break; } goto out; @@ -1421,8 +1398,8 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, irlap_recv_ui_frame(self, skb, &info); break; default: - IRDA_WARNING("%s: Unknown frame %02x received!\n", - __func__, info.control); + net_warn_ratelimited("%s: Unknown frame %02x received!\n", + __func__, info.control); break; } out: diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index a5f28d421ea843824629d49302d12aed36b9fe00..a26c401ef4a4431b2957d5b46c05c0c2a35c90bd 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -83,7 +83,6 @@ const char *irlmp_reason_str(LM_REASON reason) */ int __init irlmp_init(void) { - IRDA_DEBUG(1, "%s()\n", __func__); /* Initialize the irlmp structure. */ irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); if (irlmp == NULL) @@ -170,10 +169,8 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid) /* Allocate new instance of a LSAP connection */ self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC); - if (self == NULL) { - IRDA_ERROR("%s: can't allocate memory\n", __func__); + if (self == NULL) return NULL; - } self->magic = LMP_LSAP_MAGIC; self->slsap_sel = slsap_sel; @@ -209,8 +206,6 @@ EXPORT_SYMBOL(irlmp_open_lsap); */ static void __irlmp_close_lsap(struct lsap_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); @@ -269,9 +264,8 @@ void irlmp_close_lsap(struct lsap_cb *self) NULL); } if (!lsap) { - IRDA_DEBUG(0, - "%s(), Looks like somebody has removed me already!\n", - __func__); + pr_debug("%s(), Looks like somebody has removed me already!\n", + __func__); return; } __irlmp_close_lsap(self); @@ -297,10 +291,8 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify) * Allocate new instance of a LSAP connection */ lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL); - if (lap == NULL) { - IRDA_ERROR("%s: unable to kmalloc\n", __func__); + if (lap == NULL) return; - } lap->irlap = irlap; lap->magic = LMP_LAP_MAGIC; @@ -311,7 +303,8 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify) #endif lap->lsaps = hashbin_new(HB_LOCK); if (lap->lsaps == NULL) { - IRDA_WARNING("%s(), unable to kmalloc lsaps\n", __func__); + net_warn_ratelimited("%s(), unable to kmalloc lsaps\n", + __func__); kfree(lap); return; } @@ -343,8 +336,6 @@ void irlmp_unregister_link(__u32 saddr) { struct lap_cb *link; - IRDA_DEBUG(4, "%s()\n", __func__); - /* We must remove ourselves from the hashbin *first*. This ensure * that no more LSAPs will be open on this link and no discovery * will be triggered anymore. Jean II */ @@ -386,9 +377,8 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, IRDA_ASSERT(self != NULL, return -EBADR;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -EBADR;); - IRDA_DEBUG(2, - "%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n", - __func__, self->slsap_sel, dlsap_sel, saddr, daddr); + pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n", + __func__, self->slsap_sel, dlsap_sel, saddr, daddr); if (test_bit(0, &self->connected)) { ret = -EISCONN; @@ -432,7 +422,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, if (daddr != DEV_ADDR_ANY) discovery = hashbin_find(irlmp->cachelog, daddr, NULL); else { - IRDA_DEBUG(2, "%s(), no daddr\n", __func__); + pr_debug("%s(), no daddr\n", __func__); discovery = (discovery_t *) hashbin_get_first(irlmp->cachelog); } @@ -445,7 +435,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, } lap = hashbin_lock_find(irlmp->links, saddr, NULL); if (lap == NULL) { - IRDA_DEBUG(1, "%s(), Unable to find a usable link!\n", __func__); + pr_debug("%s(), Unable to find a usable link!\n", __func__); ret = -EHOSTUNREACH; goto err; } @@ -460,14 +450,15 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, * disconnected yet (waiting for timeout in LAP). * Maybe we could give LAP a bit of help in this case. */ - IRDA_DEBUG(0, "%s(), sorry, but I'm waiting for LAP to timeout!\n", __func__); + pr_debug("%s(), sorry, but I'm waiting for LAP to timeout!\n", + __func__); ret = -EAGAIN; goto err; } /* LAP is already connected to a different node, and LAP * can only talk to one node at a time */ - IRDA_DEBUG(0, "%s(), sorry, but link is busy!\n", __func__); + pr_debug("%s(), sorry, but link is busy!\n", __func__); ret = -EBUSY; goto err; } @@ -528,8 +519,8 @@ void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb) IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(self->lap != NULL, return;); - IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", - __func__, self->slsap_sel, self->dlsap_sel); + pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n", + __func__, self->slsap_sel, self->dlsap_sel); /* Note : self->lap is set in irlmp_link_data_indication(), * (case CONNECT_CMD:) because we have no way to set it here. @@ -569,8 +560,8 @@ int irlmp_connect_response(struct lsap_cb *self, struct sk_buff *userdata) /* We set the connected bit and move the lsap to the connected list * in the state machine itself. Jean II */ - IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", - __func__, self->slsap_sel, self->dlsap_sel); + pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n", + __func__, self->slsap_sel, self->dlsap_sel); /* Make room for MUX control header (3 bytes) */ IRDA_ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;); @@ -596,8 +587,6 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb) int lap_header_size; int max_seg_size; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); @@ -609,8 +598,8 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb) lap_header_size = IRLAP_GET_HEADER_SIZE(self->lap->irlap); max_header_size = LMP_HEADER + lap_header_size; - IRDA_DEBUG(2, "%s(), max_header_size=%d\n", - __func__, max_header_size); + pr_debug("%s(), max_header_size=%d\n", + __func__, max_header_size); /* Hide LMP_CONTROL_HEADER header from layer above */ skb_pull(skb, LMP_CONTROL_HEADER); @@ -636,16 +625,14 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance) struct lsap_cb *new; unsigned long flags; - IRDA_DEBUG(1, "%s()\n", __func__); - spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags); /* Only allowed to duplicate unconnected LSAP's, and only LSAPs * that have received a connect indication. Jean II */ if ((!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) || (orig->lap == NULL)) { - IRDA_DEBUG(0, "%s(), invalid LSAP (wrong state)\n", - __func__); + pr_debug("%s(), invalid LSAP (wrong state)\n", + __func__); spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); return NULL; @@ -654,7 +641,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance) /* Allocate a new instance */ new = kmemdup(orig, sizeof(*new), GFP_ATOMIC); if (!new) { - IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); + pr_debug("%s(), unable to kmalloc\n", __func__); spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); return NULL; @@ -700,7 +687,7 @@ int irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata) * and us that might mess up the hashbins below. This fixes it. * Jean II */ if (! test_and_clear_bit(0, &self->connected)) { - IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__); + pr_debug("%s(), already disconnected!\n", __func__); dev_kfree_skb(userdata); return -1; } @@ -754,20 +741,20 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, { struct lsap_cb *lsap; - IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__, - irlmp_reason_str(reason), reason); + pr_debug("%s(), reason=%s [%d]\n", __func__, + irlmp_reason_str(reason), reason); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); - IRDA_DEBUG(3, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", - __func__, self->slsap_sel, self->dlsap_sel); + pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n", + __func__, self->slsap_sel, self->dlsap_sel); /* Already disconnected ? * There is a race condition between irlmp_disconnect_request() * and us that might mess up the hashbins below. This fixes it. * Jean II */ if (! test_and_clear_bit(0, &self->connected)) { - IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__); + pr_debug("%s(), already disconnected!\n", __func__); return; } @@ -800,7 +787,7 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, self->notify.disconnect_indication(self->notify.instance, self, reason, skb); } else { - IRDA_DEBUG(0, "%s(), no handler\n", __func__); + pr_debug("%s(), no handler\n", __func__); } } @@ -852,8 +839,8 @@ void irlmp_do_discovery(int nslots) /* Make sure the value is sane */ if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){ - IRDA_WARNING("%s: invalid value for number of slots!\n", - __func__); + net_warn_ratelimited("%s: invalid value for number of slots!\n", + __func__); nslots = sysctl_discovery_slots = 8; } @@ -971,8 +958,6 @@ irlmp_notify_client(irlmp_client_t *client, int number; /* Number of nodes in the log */ int i; - IRDA_DEBUG(3, "%s()\n", __func__); - /* Check if client wants or not partial/selective log (optimisation) */ if (!client->disco_callback) return; @@ -1022,8 +1007,6 @@ void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode) irlmp_client_t *client; irlmp_client_t *client_next; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(log != NULL, return;); if (!(HASHBIN_GET_SIZE(log))) @@ -1057,8 +1040,6 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number) irlmp_client_t *client_next; int i; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(expiries != NULL, return;); /* For each client - notify callback may touch client list */ @@ -1091,8 +1072,6 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number) */ discovery_t *irlmp_get_discovery_response(void) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(irlmp != NULL, return NULL;); put_unaligned(irlmp->hints.word, (__u16 *)irlmp->discovery_rsp.data.hints); @@ -1169,8 +1148,6 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata) { int ret; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(userdata != NULL, return -1;); /* Make room for MUX header */ @@ -1193,8 +1170,6 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata) */ void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -1220,8 +1195,6 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata, struct sk_buff *clone_skb; struct lap_cb *lap; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(userdata != NULL, return -1;); /* Make room for MUX and PID header */ @@ -1271,8 +1244,6 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata, #ifdef CONFIG_IRDA_ULTRA void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -1314,7 +1285,7 @@ void irlmp_status_indication(struct lap_cb *self, curr->notify.status_indication(curr->notify.instance, link, lock); else - IRDA_DEBUG(2, "%s(), no handler\n", __func__); + pr_debug("%s(), no handler\n", __func__); curr = next; } @@ -1342,7 +1313,7 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow) /* Get the number of lsap. That's the only safe way to know * that we have looped around... - Jean II */ lsap_todo = HASHBIN_GET_SIZE(self->lsaps); - IRDA_DEBUG(4, "%s() : %d lsaps to scan\n", __func__, lsap_todo); + pr_debug("%s() : %d lsaps to scan\n", __func__, lsap_todo); /* Poll lsap in order until the queue is full or until we * tried them all. @@ -1361,14 +1332,16 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow) /* Uh-oh... Paranoia */ if(curr == NULL) break; - IRDA_DEBUG(4, "%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", __func__, curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap)); + pr_debug("%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", + __func__, curr, next, self->flow_next, lsap_todo, + IRLAP_GET_TX_QUEUE_LEN(self->irlap)); /* Inform lsap user that it can send one more packet. */ if (curr->notify.flow_indication != NULL) curr->notify.flow_indication(curr->notify.instance, curr, flow); else - IRDA_DEBUG(1, "%s(), no handler\n", __func__); + pr_debug("%s(), no handler\n", __func__); } } @@ -1389,32 +1362,30 @@ __u8 *irlmp_hint_to_service(__u8 *hint) * since we currently only support 2 hint bytes */ service = kmalloc(16, GFP_ATOMIC); - if (!service) { - IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__); + if (!service) return NULL; - } if (!hint[0]) { - IRDA_DEBUG(1, "\n"); + pr_debug("\n"); kfree(service); return NULL; } if (hint[0] & HINT_PNP) - IRDA_DEBUG(1, "PnP Compatible "); + pr_debug("PnP Compatible "); if (hint[0] & HINT_PDA) - IRDA_DEBUG(1, "PDA/Palmtop "); + pr_debug("PDA/Palmtop "); if (hint[0] & HINT_COMPUTER) - IRDA_DEBUG(1, "Computer "); + pr_debug("Computer "); if (hint[0] & HINT_PRINTER) { - IRDA_DEBUG(1, "Printer "); + pr_debug("Printer "); service[i++] = S_PRINTER; } if (hint[0] & HINT_MODEM) - IRDA_DEBUG(1, "Modem "); + pr_debug("Modem "); if (hint[0] & HINT_FAX) - IRDA_DEBUG(1, "Fax "); + pr_debug("Fax "); if (hint[0] & HINT_LAN) { - IRDA_DEBUG(1, "LAN Access "); + pr_debug("LAN Access "); service[i++] = S_LAN; } /* @@ -1424,22 +1395,22 @@ __u8 *irlmp_hint_to_service(__u8 *hint) */ if (hint[0] & HINT_EXTENSION) { if (hint[1] & HINT_TELEPHONY) { - IRDA_DEBUG(1, "Telephony "); + pr_debug("Telephony "); service[i++] = S_TELEPHONY; } if (hint[1] & HINT_FILE_SERVER) - IRDA_DEBUG(1, "File Server "); + pr_debug("File Server "); if (hint[1] & HINT_COMM) { - IRDA_DEBUG(1, "IrCOMM "); + pr_debug("IrCOMM "); service[i++] = S_COMM; } if (hint[1] & HINT_OBEX) { - IRDA_DEBUG(1, "IrOBEX "); + pr_debug("IrOBEX "); service[i++] = S_OBEX; } } - IRDA_DEBUG(1, "\n"); + pr_debug("\n"); /* So that client can be notified about any discovery */ service[i++] = S_ANY; @@ -1492,14 +1463,13 @@ void *irlmp_register_service(__u16 hints) { irlmp_service_t *service; - IRDA_DEBUG(4, "%s(), hints = %04x\n", __func__, hints); + pr_debug("%s(), hints = %04x\n", __func__, hints); /* Make a new registration */ service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC); - if (!service) { - IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__); + if (!service) return NULL; - } + service->hints.word = hints; hashbin_insert(irlmp->services, (irda_queue_t *) service, (long) service, NULL); @@ -1522,15 +1492,13 @@ int irlmp_unregister_service(void *handle) irlmp_service_t *service; unsigned long flags; - IRDA_DEBUG(4, "%s()\n", __func__); - if (!handle) return -1; /* Caller may call with invalid handle (it's legal) - Jean II */ service = hashbin_lock_find(irlmp->services, (long) handle, NULL); if (!service) { - IRDA_DEBUG(1, "%s(), Unknown service!\n", __func__); + pr_debug("%s(), Unknown service!\n", __func__); return -1; } @@ -1567,15 +1535,12 @@ void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb, { irlmp_client_t *client; - IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(irlmp != NULL, return NULL;); /* Make a new registration */ client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC); - if (!client) { - IRDA_DEBUG( 1, "%s(), Unable to kmalloc!\n", __func__); + if (!client) return NULL; - } /* Register the details */ client->hint_mask.word = hint_mask; @@ -1609,7 +1574,7 @@ int irlmp_update_client(void *handle, __u16 hint_mask, client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); if (!client) { - IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__); + pr_debug("%s(), Unknown client!\n", __func__); return -1; } @@ -1632,19 +1597,17 @@ int irlmp_unregister_client(void *handle) { struct irlmp_client *client; - IRDA_DEBUG(4, "%s()\n", __func__); - if (!handle) return -1; /* Caller may call with invalid handle (it's legal) - Jean II */ client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); if (!client) { - IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__); + pr_debug("%s(), Unknown client!\n", __func__); return -1; } - IRDA_DEBUG(4, "%s(), removing client!\n", __func__); + pr_debug("%s(), removing client!\n", __func__); hashbin_remove_this(irlmp->clients, (irda_queue_t *) client); kfree(client); @@ -1673,8 +1636,6 @@ static int irlmp_slsap_inuse(__u8 slsap_sel) IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;); IRDA_ASSERT(slsap_sel != LSAP_ANY, return TRUE;); - IRDA_DEBUG(4, "%s()\n", __func__); - #ifdef CONFIG_IRDA_ULTRA /* Accept all bindings to the connectionless LSAP */ if (slsap_sel == LSAP_CONNLESS) @@ -1708,8 +1669,8 @@ static int irlmp_slsap_inuse(__u8 slsap_sel) goto errlsap;); if ((self->slsap_sel == slsap_sel)) { - IRDA_DEBUG(4, "Source LSAP selector=%02x in use\n", - self->slsap_sel); + pr_debug("Source LSAP selector=%02x in use\n", + self->slsap_sel); goto errlsap; } self = (struct lsap_cb*) hashbin_get_next(lap->lsaps); @@ -1733,8 +1694,8 @@ static int irlmp_slsap_inuse(__u8 slsap_sel) while (self != NULL) { IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, goto erruncon;); if ((self->slsap_sel == slsap_sel)) { - IRDA_DEBUG(4, "Source LSAP selector=%02x in use (unconnected)\n", - self->slsap_sel); + pr_debug("Source LSAP selector=%02x in use (unconnected)\n", + self->slsap_sel); goto erruncon; } self = (struct lsap_cb*) hashbin_get_next(irlmp->unconnected_lsaps); @@ -1799,8 +1760,8 @@ static __u8 irlmp_find_free_slsap(void) /* Make sure we terminate the loop */ if (wrapped++) { - IRDA_ERROR("%s: no more free LSAPs !\n", - __func__); + net_err_ratelimited("%s: no more free LSAPs !\n", + __func__); return 0; } } @@ -1814,8 +1775,8 @@ static __u8 irlmp_find_free_slsap(void) /* Got it ! */ lsap_sel = irlmp->last_lsap_sel; - IRDA_DEBUG(4, "%s(), found free lsap_sel=%02x\n", - __func__, lsap_sel); + pr_debug("%s(), found free lsap_sel=%02x\n", + __func__, lsap_sel); return lsap_sel; } @@ -1833,26 +1794,27 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason) switch (lap_reason) { case LAP_DISC_INDICATION: /* Received a disconnect request from peer */ - IRDA_DEBUG( 1, "%s(), LAP_DISC_INDICATION\n", __func__); + pr_debug("%s(), LAP_DISC_INDICATION\n", __func__); reason = LM_USER_REQUEST; break; case LAP_NO_RESPONSE: /* To many retransmits without response */ - IRDA_DEBUG( 1, "%s(), LAP_NO_RESPONSE\n", __func__); + pr_debug("%s(), LAP_NO_RESPONSE\n", __func__); reason = LM_LAP_DISCONNECT; break; case LAP_RESET_INDICATION: - IRDA_DEBUG( 1, "%s(), LAP_RESET_INDICATION\n", __func__); + pr_debug("%s(), LAP_RESET_INDICATION\n", __func__); reason = LM_LAP_RESET; break; case LAP_FOUND_NONE: case LAP_MEDIA_BUSY: case LAP_PRIMARY_CONFLICT: - IRDA_DEBUG(1, "%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", __func__); + pr_debug("%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", + __func__); reason = LM_CONNECT_FAILURE; break; default: - IRDA_DEBUG(1, "%s(), Unknown IrLAP disconnect reason %d!\n", - __func__, lap_reason); + pr_debug("%s(), Unknown IrLAP disconnect reason %d!\n", + __func__, lap_reason); reason = LM_LAP_DISCONNECT; break; } diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c index 9505a7d06f1a8483a7b5658c702273df660f13b0..e306cf2c1e049715d16719e14cd05432f9b58168 100644 --- a/net/irda/irlmp_event.c +++ b/net/irda/irlmp_event.c @@ -48,8 +48,7 @@ const char *const irlsap_state[] = { "LSAP_SETUP_PEND", }; -#ifdef CONFIG_IRDA_DEBUG -static const char *const irlmp_event[] = { +static const char *const irlmp_event[] __maybe_unused = { "LM_CONNECT_REQUEST", "LM_CONNECT_CONFIRM", "LM_CONNECT_RESPONSE", @@ -75,7 +74,6 @@ static const char *const irlmp_event[] = { "LM_LAP_DISCOVERY_CONFIRM", "LM_LAP_IDLE_TIMEOUT", }; -#endif /* CONFIG_IRDA_DEBUG */ /* LAP Connection control proto declarations */ static void irlmp_state_standby (struct lap_cb *, IRLMP_EVENT, @@ -120,7 +118,7 @@ static inline void irlmp_next_lap_state(struct lap_cb *self, IRLMP_STATE state) { /* - IRDA_DEBUG(4, "%s(), LMP LAP = %s\n", __func__, irlmp_state[state]); + pr_debug("%s(), LMP LAP = %s\n", __func__, irlmp_state[state]); */ self->lap_state = state; } @@ -130,7 +128,7 @@ static inline void irlmp_next_lsap_state(struct lsap_cb *self, { /* IRDA_ASSERT(self != NULL, return;); - IRDA_DEBUG(4, "%s(), LMP LSAP = %s\n", __func__, irlsap_state[state]); + pr_debug("%s(), LMP LSAP = %s\n", __func__, irlsap_state[state]); */ self->lsap_state = state; } @@ -142,8 +140,8 @@ int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event, IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); - IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", - __func__, irlmp_event[event], irlsap_state[ self->lsap_state]); + pr_debug("%s(), EVENT = %s, STATE = %s\n", + __func__, irlmp_event[event], irlsap_state[self->lsap_state]); return (*lsap_state[self->lsap_state]) (self, event, skb); } @@ -160,17 +158,15 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event, IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); - IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", __func__, - irlmp_event[event], - irlmp_state[self->lap_state]); + pr_debug("%s(), EVENT = %s, STATE = %s\n", __func__, + irlmp_event[event], + irlmp_state[self->lap_state]); (*lap_state[self->lap_state]) (self, event, skb); } void irlmp_discovery_timer_expired(void *data) { - IRDA_DEBUG(4, "%s()\n", __func__); - /* We always cleanup the log (active & passive discovery) */ irlmp_do_expiry(); @@ -184,8 +180,6 @@ void irlmp_watchdog_timer_expired(void *data) { struct lsap_cb *self = (struct lsap_cb *) data; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); @@ -196,8 +190,6 @@ void irlmp_idle_timer_expired(void *data) { struct lap_cb *self = (struct lap_cb *) data; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); @@ -256,7 +248,6 @@ irlmp_do_all_lsap_event(hashbin_t * lsap_hashbin, static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self->irlap != NULL, return;); switch (event) { @@ -276,7 +267,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, irlap_connect_response(self->irlap, skb); break; case LM_LAP_CONNECT_REQUEST: - IRDA_DEBUG(4, "%s() LS_CONNECT_REQUEST\n", __func__); + pr_debug("%s() LS_CONNECT_REQUEST\n", __func__); irlmp_next_lap_state(self, LAP_U_CONNECT); @@ -284,14 +275,14 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, irlap_connect_request(self->irlap, self->daddr, NULL, 0); break; case LM_LAP_DISCONNECT_INDICATION: - IRDA_DEBUG(4, "%s(), Error LM_LAP_DISCONNECT_INDICATION\n", - __func__); + pr_debug("%s(), Error LM_LAP_DISCONNECT_INDICATION\n", + __func__); irlmp_next_lap_state(self, LAP_STANDBY); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s\n", - __func__, irlmp_event[event]); + pr_debug("%s(), Unknown event %s\n", + __func__, irlmp_event[event]); break; } } @@ -306,7 +297,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s(), event=%s\n", __func__, irlmp_event[event]); + pr_debug("%s(), event=%s\n", __func__, irlmp_event[event]); switch (event) { case LM_LAP_CONNECT_INDICATION: @@ -326,7 +317,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, * the lsaps may already have gone. This avoid getting stuck * forever in LAP_ACTIVE state - Jean II */ if (HASHBIN_GET_SIZE(self->lsaps) == 0) { - IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__); + pr_debug("%s() NO LSAPs !\n", __func__); irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); } break; @@ -344,12 +335,12 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, * the lsaps may already have gone. This avoid getting stuck * forever in LAP_ACTIVE state - Jean II */ if (HASHBIN_GET_SIZE(self->lsaps) == 0) { - IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__); + pr_debug("%s() NO LSAPs !\n", __func__); irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); } break; case LM_LAP_DISCONNECT_INDICATION: - IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_INDICATION\n", __func__); + pr_debug("%s(), LM_LAP_DISCONNECT_INDICATION\n", __func__); irlmp_next_lap_state(self, LAP_STANDBY); /* Send disconnect event to all LSAPs using this link */ @@ -357,7 +348,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, LM_LAP_DISCONNECT_INDICATION); break; case LM_LAP_DISCONNECT_REQUEST: - IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_REQUEST\n", __func__); + pr_debug("%s(), LM_LAP_DISCONNECT_REQUEST\n", __func__); /* One of the LSAP did timeout or was closed, if it was * the last one, try to get out of here - Jean II */ @@ -366,7 +357,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, } break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s\n", + pr_debug("%s(), Unknown event %s\n", __func__, irlmp_event[event]); break; } @@ -381,11 +372,9 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - switch (event) { case LM_LAP_CONNECT_REQUEST: - IRDA_DEBUG(4, "%s(), LS_CONNECT_REQUEST\n", __func__); + pr_debug("%s(), LS_CONNECT_REQUEST\n", __func__); /* * IrLAP may have a pending disconnect. We tried to close @@ -467,7 +456,7 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event, irlmp_do_expiry(); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s\n", + pr_debug("%s(), Unknown event %s\n", __func__, irlmp_event[event]); break; } @@ -490,8 +479,6 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, { int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); @@ -505,11 +492,11 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, break; #endif /* CONFIG_IRDA_ULTRA */ case LM_CONNECT_REQUEST: - IRDA_DEBUG(4, "%s(), LM_CONNECT_REQUEST\n", __func__); + pr_debug("%s(), LM_CONNECT_REQUEST\n", __func__); if (self->conn_skb) { - IRDA_WARNING("%s: busy with another request!\n", - __func__); + net_warn_ratelimited("%s: busy with another request!\n", + __func__); return -EBUSY; } /* Don't forget to refcount it (see irlmp_connect_request()) */ @@ -525,8 +512,8 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, break; case LM_CONNECT_INDICATION: if (self->conn_skb) { - IRDA_WARNING("%s: busy with another request!\n", - __func__); + net_warn_ratelimited("%s: busy with another request!\n", + __func__); return -EBUSY; } /* Don't forget to refcount it (see irlap_driver_rcv()) */ @@ -551,8 +538,8 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -570,8 +557,6 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event, struct lsap_cb *lsap; int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); @@ -603,7 +588,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event, case LM_WATCHDOG_TIMEOUT: /* May happen, who knows... * Jean II */ - IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__); + pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__); /* Disconnect, get out... - Jean II */ self->lap = NULL; @@ -613,8 +598,8 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event, default: /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we * are *not* yet bound to the IrLAP link. Jean II */ - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -632,8 +617,6 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, struct sk_buff *tx_skb; int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); @@ -642,17 +625,17 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, /* Keep state */ break; case LM_CONNECT_RESPONSE: - IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " - "no indication issued yet\n", __func__); + pr_debug("%s(), LM_CONNECT_RESPONSE, no indication issued yet\n", + __func__); /* Keep state */ break; case LM_DISCONNECT_REQUEST: - IRDA_DEBUG(0, "%s(), LM_DISCONNECT_REQUEST, " - "not yet bound to IrLAP connection\n", __func__); + pr_debug("%s(), LM_DISCONNECT_REQUEST, not yet bound to IrLAP connection\n", + __func__); /* Keep state */ break; case LM_LAP_CONNECT_CONFIRM: - IRDA_DEBUG(4, "%s(), LS_CONNECT_CONFIRM\n", __func__); + pr_debug("%s(), LS_CONNECT_CONFIRM\n", __func__); irlmp_next_lsap_state(self, LSAP_CONNECT); tx_skb = self->conn_skb; @@ -666,7 +649,7 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, /* Will happen in some rare cases because of a race condition. * Just make sure we don't stay there forever... * Jean II */ - IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__); + pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__); /* Go back to disconnected mode, keep the socket waiting */ self->lap = NULL; @@ -679,8 +662,8 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, default: /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we * are *not* yet bound to the IrLAP link. Jean II */ - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -698,8 +681,6 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, LM_REASON reason; int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); IRDA_ASSERT(self->lap != NULL, return -1;); @@ -721,13 +702,13 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, irlmp_udata_indication(self, skb); break; case LM_CONNECT_REQUEST: - IRDA_DEBUG(0, "%s(), LM_CONNECT_REQUEST, " - "error, LSAP already connected\n", __func__); + pr_debug("%s(), LM_CONNECT_REQUEST, error, LSAP already connected\n", + __func__); /* Keep state */ break; case LM_CONNECT_RESPONSE: - IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " - "error, LSAP already connected\n", __func__); + pr_debug("%s(), LM_CONNECT_RESPONSE, error, LSAP already connected\n", + __func__); /* Keep state */ break; case LM_DISCONNECT_REQUEST: @@ -739,8 +720,8 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, /* Try to close the LAP connection if its still there */ if (self->lap) { - IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", - __func__); + pr_debug("%s(), trying to close IrLAP\n", + __func__); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); @@ -764,14 +745,14 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, reason = skb->data[3]; /* Try to close the LAP connection */ - IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__); + pr_debug("%s(), trying to close IrLAP\n", __func__); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); irlmp_disconnect_indication(self, reason, skb); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -793,8 +774,6 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); - IRDA_DEBUG(4, "%s()\n", __func__); - switch (event) { case LM_CONNECT_CONFIRM: irlmp_next_lsap_state(self, LSAP_DATA_TRANSFER_READY); @@ -814,7 +793,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, reason = skb->data[3]; /* Try to close the LAP connection */ - IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__); + pr_debug("%s(), trying to close IrLAP\n", __func__); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); irlmp_disconnect_indication(self, reason, skb); @@ -832,7 +811,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, irlmp_disconnect_indication(self, reason, skb); break; case LM_WATCHDOG_TIMEOUT: - IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__); + pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__); IRDA_ASSERT(self->lap != NULL, return -1;); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); @@ -841,8 +820,8 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, irlmp_disconnect_indication(self, LM_CONNECT_FAILURE, NULL); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -863,8 +842,6 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event, LM_REASON reason; int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(irlmp != NULL, return -1;); @@ -883,7 +860,7 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event, irlmp_next_lsap_state(self, LSAP_SETUP); break; case LM_WATCHDOG_TIMEOUT: - IRDA_DEBUG(0, "%s() : WATCHDOG_TIMEOUT !\n", __func__); + pr_debug("%s() : WATCHDOG_TIMEOUT !\n", __func__); IRDA_ASSERT(self->lap != NULL, return -1;); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); @@ -901,8 +878,8 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event, irlmp_disconnect_indication(self, reason, NULL); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c index 062e63b1c5c4cf32d8722e602975db4b15cbf2bd..38b0f994bc7b60664e8d5409fc375be64cd7eafc 100644 --- a/net/irda/irlmp_frame.c +++ b/net/irda/irlmp_frame.c @@ -44,7 +44,7 @@ inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, skb->data[1] = slsap; if (expedited) { - IRDA_DEBUG(4, "%s(), sending expedited data\n", __func__); + pr_debug("%s(), sending expedited data\n", __func__); irlap_data_request(self->irlap, skb, TRUE); } else irlap_data_request(self->irlap, skb, FALSE); @@ -60,8 +60,6 @@ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, { __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -95,8 +93,6 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, __u8 dlsap_sel; /* Destination LSAP address */ __u8 *fp; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(skb->len > 2, return;); @@ -115,9 +111,8 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, * it in a different way than other established connections. */ if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) { - IRDA_DEBUG(3, "%s(), incoming connection, " - "source LSAP=%d, dest LSAP=%d\n", - __func__, slsap_sel, dlsap_sel); + pr_debug("%s(), incoming connection, source LSAP=%d, dest LSAP=%d\n", + __func__, slsap_sel, dlsap_sel); /* Try to find LSAP among the unconnected LSAPs */ lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD, @@ -125,7 +120,8 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, /* Maybe LSAP was already connected, so try one more time */ if (!lsap) { - IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __func__); + pr_debug("%s(), incoming connection for LSAP already connected\n", + __func__); lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0, self->lsaps); } @@ -134,14 +130,14 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, self->lsaps); if (lsap == NULL) { - IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n"); - IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n", - __func__, slsap_sel, dlsap_sel); + pr_debug("IrLMP, Sorry, no LSAP for received frame!\n"); + pr_debug("%s(), slsap_sel = %02x, dlsap_sel = %02x\n", + __func__, slsap_sel, dlsap_sel); if (fp[0] & CONTROL_BIT) { - IRDA_DEBUG(2, "%s(), received control frame %02x\n", - __func__, fp[2]); + pr_debug("%s(), received control frame %02x\n", + __func__, fp[2]); } else { - IRDA_DEBUG(2, "%s(), received data frame\n", __func__); + pr_debug("%s(), received data frame\n", __func__); } return; } @@ -159,20 +155,20 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, irlmp_do_lsap_event(lsap, LM_CONNECT_CONFIRM, skb); break; case DISCONNECT: - IRDA_DEBUG(4, "%s(), Disconnect indication!\n", - __func__); + pr_debug("%s(), Disconnect indication!\n", + __func__); irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION, skb); break; case ACCESSMODE_CMD: - IRDA_DEBUG(0, "Access mode cmd not implemented!\n"); + pr_debug("Access mode cmd not implemented!\n"); break; case ACCESSMODE_CNF: - IRDA_DEBUG(0, "Access mode cnf not implemented!\n"); + pr_debug("Access mode cnf not implemented!\n"); break; default: - IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n", - __func__, fp[2]); + pr_debug("%s(), Unknown control frame %02x\n", + __func__, fp[2]); break; } } else if (unreliable) { @@ -206,8 +202,6 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) __u8 *fp; unsigned long flags; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(skb->len > 2, return;); @@ -223,14 +217,14 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) pid = fp[2]; if (pid & 0x80) { - IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", - __func__); + pr_debug("%s(), extension in PID not supp!\n", + __func__); return; } /* Check if frame is addressed to the connectionless LSAP */ if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) { - IRDA_DEBUG(0, "%s(), dropping frame!\n", __func__); + pr_debug("%s(), dropping frame!\n", __func__); return; } @@ -254,7 +248,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) if (lsap) irlmp_connless_data_indication(lsap, skb); else { - IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __func__); + pr_debug("%s(), found no matching LSAP!\n", __func__); } } #endif /* CONFIG_IRDA_ULTRA */ @@ -270,8 +264,6 @@ void irlmp_link_disconnect_indication(struct lap_cb *lap, LAP_REASON reason, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(lap != NULL, return;); IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;); @@ -296,8 +288,6 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr, __u32 daddr, struct qos_info *qos, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - /* Copy QoS settings for this session */ self->qos = qos; @@ -317,8 +307,6 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr, void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(qos != NULL, return;); @@ -383,8 +371,6 @@ void irlmp_link_discovery_indication(struct lap_cb *self, */ void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); diff --git a/net/irda/irmod.c b/net/irda/irmod.c index 303a68d92731085226366bbd72579ea4c7d97027..c5e35b85c477c2c80a5a9b4398c79c4359cc7921 100644 --- a/net/irda/irmod.c +++ b/net/irda/irmod.c @@ -42,16 +42,6 @@ #include /* irttp_init */ #include /* irda_device_init */ -/* - * Module parameters - */ -#ifdef CONFIG_IRDA_DEBUG -unsigned int irda_debug = IRDA_DEBUG_LEVEL; -module_param_named(debug, irda_debug, uint, 0); -MODULE_PARM_DESC(debug, "IRDA debugging level"); -EXPORT_SYMBOL(irda_debug); -#endif - /* Packet type handler. * Tell the kernel how IrDA packets should be handled. */ @@ -90,8 +80,6 @@ static int __init irda_init(void) { int ret = 0; - IRDA_DEBUG(0, "%s()\n", __func__); - /* Lower layer of the stack */ irlmp_init(); irlap_init(); diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c index a37b81fe04798e0c8cb0196f72b2cf97e4b610b7..e15c40e86660cf204cc1bc734abde164fb3ef22e 100644 --- a/net/irda/irnetlink.c +++ b/net/irda/irnetlink.c @@ -41,7 +41,7 @@ static struct net_device * ifname_to_netdev(struct net *net, struct genl_info *i ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]); - IRDA_DEBUG(5, "%s(): Looking for %s\n", __func__, ifname); + pr_debug("%s(): Looking for %s\n", __func__, ifname); return dev_get_by_name(net, ifname); } @@ -57,7 +57,7 @@ static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info) mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]); - IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __func__, mode); + pr_debug("%s(): Switching to mode: %d\n", __func__, mode); dev = ifname_to_netdev(&init_net, info); if (!dev) diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c index 7152624ed5f18035c2fd223bffd0682697cb86e4..acbe61c7e6831205ca1d725f65263c849ef092e4 100644 --- a/net/irda/irqueue.c +++ b/net/irda/irqueue.c @@ -233,8 +233,6 @@ static __u32 hash( const char* name) static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) { - IRDA_DEBUG( 4, "%s()\n", __func__); - /* * Check if queue is empty. */ @@ -267,7 +265,7 @@ static irda_queue_t *dequeue_first(irda_queue_t **queue) { irda_queue_t *ret; - IRDA_DEBUG( 4, "dequeue_first()\n"); + pr_debug("dequeue_first()\n"); /* * Set return value @@ -308,7 +306,7 @@ static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element { irda_queue_t *ret; - IRDA_DEBUG( 4, "dequeue_general()\n"); + pr_debug("dequeue_general()\n"); /* * Set return value @@ -452,8 +450,6 @@ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, unsigned long flags = 0; int bin; - IRDA_DEBUG( 4, "%s()\n", __func__); - IRDA_ASSERT( hashbin != NULL, return;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); @@ -565,8 +561,6 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) unsigned long flags = 0; irda_queue_t* entry; - IRDA_DEBUG( 4, "%s()\n", __func__); - IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); @@ -658,8 +652,6 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) int bin; long hashv; - IRDA_DEBUG( 4, "%s()\n", __func__); - IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); IRDA_ASSERT( entry != NULL, return NULL;); @@ -719,7 +711,7 @@ void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name ) int bin; irda_queue_t* entry; - IRDA_DEBUG( 4, "hashbin_find()\n"); + pr_debug("hashbin_find()\n"); IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index d6a59651767a89ca8a2a485665c0040a7ff0c641..873da5e7d428ff1c9c0593f089f215caad81b4bc 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -126,15 +126,6 @@ static struct ctl_table irda_table[] = { .mode = 0644, .proc_handler = do_devname, }, -#ifdef CONFIG_IRDA_DEBUG - { - .procname = "debug", - .data = &irda_debug, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, -#endif #ifdef CONFIG_IRDA_FAST_RR { .procname = "fast_poll_increase", diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 85372cfa7b9f82b690579de4533ac27cf4ad09f6..b6ab41d5b3a34afcc7f3e94324e3965697e37bd2 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -71,11 +71,13 @@ static void irttp_status_indication(void *instance, LINK_STATUS link, LOCK_STATUS lock); /* Information for parsing parameters in IrTTP */ -static pi_minor_info_t pi_minor_call_table[] = { +static const pi_minor_info_t pi_minor_call_table[] = { { NULL, 0 }, /* 0x00 */ { irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */ }; -static pi_major_info_t pi_major_call_table[] = { { pi_minor_call_table, 2 } }; +static const pi_major_info_t pi_major_call_table[] = { + { pi_minor_call_table, 2 } +}; static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 }; /************************ GLOBAL PROCEDURES ************************/ @@ -96,8 +98,8 @@ int __init irttp_init(void) irttp->tsaps = hashbin_new(HB_LOCK); if (!irttp->tsaps) { - IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n", - __func__); + net_err_ratelimited("%s: can't allocate IrTTP hashbin!\n", + __func__); kfree(irttp); return -ENOMEM; } @@ -166,7 +168,7 @@ static void irttp_todo_expired(unsigned long data) if (!self || self->magic != TTP_TSAP_MAGIC) return; - IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self); + pr_debug("%s(instance=%p)\n", __func__, self); /* Try to make some progress, especially on Tx side - Jean II */ irttp_run_rx_queue(self); @@ -207,8 +209,6 @@ static void irttp_flush_queues(struct tsap_cb *self) { struct sk_buff *skb; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); @@ -240,8 +240,8 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self) IRDA_ASSERT(self != NULL, return NULL;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;); - IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__, - self->rx_sdu_size); + pr_debug("%s(), self->rx_sdu_size=%d\n", __func__, + self->rx_sdu_size); skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size); if (!skb) @@ -264,9 +264,8 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self) dev_kfree_skb(frag); } - IRDA_DEBUG(2, - "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n", - __func__, n, self->rx_sdu_size, self->rx_max_sdu_size); + pr_debug("%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n", + __func__, n, self->rx_sdu_size, self->rx_max_sdu_size); /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size * by summing the size of all fragments, so we should always * have n == self->rx_sdu_size, except in cases where we @@ -295,8 +294,6 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *frag; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -305,7 +302,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, * Split frame into a number of segments */ while (skb->len > self->max_seg_size) { - IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__); + pr_debug("%s(), fragmenting ...\n", __func__); /* Make new segment */ frag = alloc_skb(self->max_seg_size+self->max_header_size, @@ -330,7 +327,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, skb_queue_tail(&self->tx_queue, frag); } /* Queue what is left of the original skb */ - IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__); + pr_debug("%s(), queuing last segment\n", __func__); frame = skb_push(skb, TTP_HEADER); frame[0] = 0x00; /* Clear more bit */ @@ -361,7 +358,7 @@ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param, else self->tx_max_sdu_size = param->pv.i; - IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i); + pr_debug("%s(), MaxSduSize=%d\n", __func__, param->pv.i); return 0; } @@ -402,15 +399,13 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) * JeanII */ if ((stsap_sel != LSAP_ANY) && ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) { - IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__); + pr_debug("%s(), invalid tsap!\n", __func__); return NULL; } self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC); - if (self == NULL) { - IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__); + if (self == NULL) return NULL; - } /* Initialize internal objects */ irttp_init_tsap(self); @@ -440,7 +435,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) */ lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); if (lsap == NULL) { - IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__); + pr_debug("%s: unable to allocate LSAP!!\n", __func__); __irttp_close_tsap(self); return NULL; } @@ -451,7 +446,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) * the stsap_sel we have might not be valid anymore */ self->stsap_sel = lsap->slsap_sel; - IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel); + pr_debug("%s(), stsap_sel=%02x\n", __func__, self->stsap_sel); self->notify = *notify; self->lsap = lsap; @@ -509,8 +504,6 @@ int irttp_close_tsap(struct tsap_cb *self) { struct tsap_cb *tsap; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); @@ -518,8 +511,8 @@ int irttp_close_tsap(struct tsap_cb *self) if (self->connected) { /* Check if disconnect is not pending */ if (!test_bit(0, &self->disconnect_pend)) { - IRDA_WARNING("%s: TSAP still connected!\n", - __func__); + net_warn_ratelimited("%s: TSAP still connected!\n", + __func__); irttp_disconnect_request(self, NULL, P_NORMAL); } self->close_pend = TRUE; @@ -558,8 +551,6 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); - IRDA_DEBUG(4, "%s()\n", __func__); - /* Take shortcut on zero byte packets */ if (skb->len == 0) { ret = 0; @@ -568,13 +559,14 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) /* Check that nothing bad happens */ if (!self->connected) { - IRDA_WARNING("%s(), Not connected\n", __func__); + net_warn_ratelimited("%s(), Not connected\n", __func__); ret = -ENOTCONN; goto err; } if (skb->len > self->max_seg_size) { - IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__); + net_err_ratelimited("%s(), UData is too large for IrLAP!\n", + __func__); ret = -EMSGSIZE; goto err; } @@ -606,8 +598,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); - IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, - skb_queue_len(&self->tx_queue)); + pr_debug("%s() : queue len = %d\n", __func__, + skb_queue_len(&self->tx_queue)); /* Take shortcut on zero byte packets */ if (skb->len == 0) { @@ -617,7 +609,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) /* Check that nothing bad happens */ if (!self->connected) { - IRDA_WARNING("%s: Not connected\n", __func__); + net_warn_ratelimited("%s: Not connected\n", __func__); ret = -ENOTCONN; goto err; } @@ -627,8 +619,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) * inside an IrLAP frame */ if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) { - IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n", - __func__); + net_err_ratelimited("%s: SAR disabled, and data is too large for IrLAP!\n", + __func__); ret = -EMSGSIZE; goto err; } @@ -640,8 +632,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) if ((self->tx_max_sdu_size != 0) && (self->tx_max_sdu_size != TTP_SAR_UNBOUND) && (skb->len > self->tx_max_sdu_size)) { - IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n", - __func__); + net_err_ratelimited("%s: SAR enabled, but data is larger than TxMaxSduSize!\n", + __func__); ret = -EMSGSIZE; goto err; } @@ -719,9 +711,9 @@ static void irttp_run_tx_queue(struct tsap_cb *self) unsigned long flags; int n; - IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n", - __func__, - self->send_credit, skb_queue_len(&self->tx_queue)); + pr_debug("%s() : send_credit = %d, queue_len = %d\n", + __func__, + self->send_credit, skb_queue_len(&self->tx_queue)); /* Get exclusive access to the tx queue, otherwise don't touch it */ if (irda_lock(&self->tx_queue_lock) == FALSE) @@ -826,9 +818,9 @@ static inline void irttp_give_credit(struct tsap_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); - IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", - __func__, - self->send_credit, self->avail_credit, self->remote_credit); + pr_debug("%s() send=%d,avail=%d,remote=%d\n", + __func__, + self->send_credit, self->avail_credit, self->remote_credit); /* Give credit to peer */ tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC); @@ -876,8 +868,6 @@ static int irttp_udata_indication(void *instance, void *sap, struct tsap_cb *self; int err; - IRDA_DEBUG(4, "%s()\n", __func__); - self = instance; IRDA_ASSERT(self != NULL, return -1;); @@ -993,8 +983,6 @@ static void irttp_status_indication(void *instance, { struct tsap_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__); - self = instance; IRDA_ASSERT(self != NULL, return;); @@ -1011,7 +999,7 @@ static void irttp_status_indication(void *instance, self->notify.status_indication(self->notify.instance, link, lock); else - IRDA_DEBUG(2, "%s(), no handler\n", __func__); + pr_debug("%s(), no handler\n", __func__); } /* @@ -1029,7 +1017,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); - IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self); + pr_debug("%s(instance=%p)\n", __func__, self); /* We are "polled" directly from LAP, and the LAP want to fill * its Tx window. We want to do our best to send it data, so that @@ -1067,18 +1055,16 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) */ void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) { - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); switch (flow) { case FLOW_STOP: - IRDA_DEBUG(1, "%s(), flow stop\n", __func__); + pr_debug("%s(), flow stop\n", __func__); self->rx_sdu_busy = TRUE; break; case FLOW_START: - IRDA_DEBUG(1, "%s(), flow start\n", __func__); + pr_debug("%s(), flow start\n", __func__); self->rx_sdu_busy = FALSE; /* Client say he can accept more data, try to free our @@ -1087,7 +1073,7 @@ void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) break; default: - IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__); + pr_debug("%s(), Unknown flow command!\n", __func__); } } EXPORT_SYMBOL(irttp_flow_request); @@ -1107,7 +1093,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, __u8 *frame; __u8 n; - IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size); + pr_debug("%s(), max_sdu_size=%d\n", __func__, max_sdu_size); IRDA_ASSERT(self != NULL, return -EBADR;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;); @@ -1205,8 +1191,6 @@ static void irttp_connect_confirm(void *instance, void *sap, __u8 plen; __u8 n; - IRDA_DEBUG(4, "%s()\n", __func__); - self = instance; IRDA_ASSERT(self != NULL, return;); @@ -1221,15 +1205,15 @@ static void irttp_connect_confirm(void *instance, void *sap, * negotiated QoS for the link. */ if (qos) { - IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n", - qos->baud_rate.bits); - IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n", - qos->baud_rate.value); + pr_debug("IrTTP, Negotiated BAUD_RATE: %02x\n", + qos->baud_rate.bits); + pr_debug("IrTTP, Negotiated BAUD_RATE: %d bps.\n", + qos->baud_rate.value); } n = skb->data[0] & 0x7f; - IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n); + pr_debug("%s(), Initial send_credit=%d\n", __func__, n); self->send_credit = n; self->tx_max_sdu_size = 0; @@ -1249,8 +1233,8 @@ static void irttp_connect_confirm(void *instance, void *sap, /* Any errors in the parameter list? */ if (ret < 0) { - IRDA_WARNING("%s: error extracting parameters\n", - __func__); + net_warn_ratelimited("%s: error extracting parameters\n", + __func__); dev_kfree_skb(skb); /* Do not accept this connection attempt */ @@ -1260,11 +1244,11 @@ static void irttp_connect_confirm(void *instance, void *sap, skb_pull(skb, IRDA_MIN(skb->len, plen+1)); } - IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__, - self->send_credit, self->avail_credit, self->remote_credit); + pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__, + self->send_credit, self->avail_credit, self->remote_credit); - IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__, - self->tx_max_sdu_size); + pr_debug("%s(), MaxSduSize=%d\n", __func__, + self->tx_max_sdu_size); if (self->notify.connect_confirm) { self->notify.connect_confirm(self->notify.instance, self, qos, @@ -1302,7 +1286,7 @@ static void irttp_connect_indication(void *instance, void *sap, self->max_seg_size = max_seg_size - TTP_HEADER; self->max_header_size = max_header_size+TTP_HEADER; - IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel); + pr_debug("%s(), TSAP sel=%02x\n", __func__, self->stsap_sel); /* Need to update dtsap_sel if its equal to LSAP_ANY */ self->dtsap_sel = lsap->dlsap_sel; @@ -1326,8 +1310,8 @@ static void irttp_connect_indication(void *instance, void *sap, /* Any errors in the parameter list? */ if (ret < 0) { - IRDA_WARNING("%s: error extracting parameters\n", - __func__); + net_warn_ratelimited("%s: error extracting parameters\n", + __func__); dev_kfree_skb(skb); /* Do not accept this connection attempt */ @@ -1364,8 +1348,8 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); - IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__, - self->stsap_sel); + pr_debug("%s(), Source TSAP selector=%02x\n", __func__, + self->stsap_sel); /* Any userdata supplied? */ if (userdata == NULL) { @@ -1446,14 +1430,12 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) struct tsap_cb *new; unsigned long flags; - IRDA_DEBUG(1, "%s()\n", __func__); - /* Protect our access to the old tsap instance */ spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags); /* Find the old instance */ if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) { - IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__); + pr_debug("%s(), unable to find TSAP\n", __func__); spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); return NULL; } @@ -1461,7 +1443,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) /* Allocate a new instance */ new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC); if (!new) { - IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); + pr_debug("%s(), unable to kmalloc\n", __func__); spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); return NULL; } @@ -1473,7 +1455,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) /* Try to dup the LSAP (may fail if we were too slow) */ new->lsap = irlmp_dup(orig->lsap, new); if (!new->lsap) { - IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); + pr_debug("%s(), dup failed!\n", __func__); kfree(new); return NULL; } @@ -1508,7 +1490,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, /* Already disconnected? */ if (!self->connected) { - IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__); + pr_debug("%s(), already disconnected!\n", __func__); if (userdata) dev_kfree_skb(userdata); return -1; @@ -1520,8 +1502,8 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, * for following a disconnect_indication() (i.e. net_bh). * Jean II */ if (test_and_set_bit(0, &self->disconnect_pend)) { - IRDA_DEBUG(0, "%s(), disconnect already pending\n", - __func__); + pr_debug("%s(), disconnect already pending\n", + __func__); if (userdata) dev_kfree_skb(userdata); @@ -1540,7 +1522,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, * disconnecting right now since the data will * not have any usable connection to be sent on */ - IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__); + pr_debug("%s(): High priority!!()\n", __func__); irttp_flush_queues(self); } else if (priority == P_NORMAL) { /* @@ -1561,7 +1543,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, * be sent at the LMP level (so even if the peer has its Tx queue * full of data). - Jean II */ - IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__); + pr_debug("%s(), Disconnecting ...\n", __func__); self->connected = FALSE; if (!userdata) { @@ -1597,8 +1579,6 @@ static void irttp_disconnect_indication(void *instance, void *sap, { struct tsap_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__); - self = instance; IRDA_ASSERT(self != NULL, return;); @@ -1657,7 +1637,7 @@ static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb) * give an error back */ if (err) { - IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__); + pr_debug("%s() requeueing skb!\n", __func__); /* Make sure we take a break */ self->rx_sdu_busy = TRUE; @@ -1682,8 +1662,8 @@ static void irttp_run_rx_queue(struct tsap_cb *self) struct sk_buff *skb; int more = 0; - IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__, - self->send_credit, self->avail_credit, self->remote_credit); + pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__, + self->send_credit, self->avail_credit, self->remote_credit); /* Get exclusive access to the rx queue, otherwise don't touch it */ if (irda_lock(&self->rx_queue_lock) == FALSE) @@ -1722,8 +1702,8 @@ static void irttp_run_rx_queue(struct tsap_cb *self) * limits of the maximum size of the rx_sdu */ if (self->rx_sdu_size <= self->rx_max_sdu_size) { - IRDA_DEBUG(4, "%s(), queueing frag\n", - __func__); + pr_debug("%s(), queueing frag\n", + __func__); skb_queue_tail(&self->rx_fragments, skb); } else { /* Free the part of the SDU that is too big */ @@ -1752,7 +1732,7 @@ static void irttp_run_rx_queue(struct tsap_cb *self) /* Now we can deliver the reassembled skb */ irttp_do_data_indication(self, skb); } else { - IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__); + pr_debug("%s(), Truncated frame\n", __func__); /* Free the part of the SDU that is too big */ dev_kfree_skb(skb); diff --git a/net/irda/parameters.c b/net/irda/parameters.c index 6d0869716bf680f91670bfa8e48f999137bb09fe..16ce32ffe004484c20618a4f2c00adecea13dec8 100644 --- a/net/irda/parameters.c +++ b/net/irda/parameters.c @@ -52,7 +52,7 @@ static int irda_insert_no_value(void *self, __u8 *buf, int len, __u8 pi, static int irda_param_unpack(__u8 *buf, char *fmt, ...); /* Parameter value call table. Must match PV_TYPE */ -static PV_HANDLER pv_extract_table[] = { +static const PV_HANDLER pv_extract_table[] = { irda_extract_integer, /* Handler for any length integers */ irda_extract_integer, /* Handler for 8 bits integers */ irda_extract_integer, /* Handler for 16 bits integers */ @@ -62,7 +62,7 @@ static PV_HANDLER pv_extract_table[] = { irda_extract_no_value /* Handler for no value parameters */ }; -static PV_HANDLER pv_insert_table[] = { +static const PV_HANDLER pv_insert_table[] = { irda_insert_integer, /* Handler for any length integers */ irda_insert_integer, /* Handler for 8 bits integers */ irda_insert_integer, /* Handler for 16 bits integers */ @@ -146,24 +146,24 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi, */ if (p.pl == 0) { if (p.pv.i < 0xff) { - IRDA_DEBUG(2, "%s(), using 1 byte\n", __func__); + pr_debug("%s(), using 1 byte\n", __func__); p.pl = 1; } else if (p.pv.i < 0xffff) { - IRDA_DEBUG(2, "%s(), using 2 bytes\n", __func__); + pr_debug("%s(), using 2 bytes\n", __func__); p.pl = 2; } else { - IRDA_DEBUG(2, "%s(), using 4 bytes\n", __func__); + pr_debug("%s(), using 4 bytes\n", __func__); p.pl = 4; /* Default length */ } } /* Check if buffer is long enough for insertion */ if (len < (2+p.pl)) { - IRDA_WARNING("%s: buffer too short for insertion!\n", - __func__); + net_warn_ratelimited("%s: buffer too short for insertion!\n", + __func__); return -1; } - IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__, - p.pi, p.pl, p.pv.i); + pr_debug("%s(), pi=%#x, pl=%d, pi=%d\n", __func__, + p.pi, p.pl, p.pv.i); switch (p.pl) { case 1: n += irda_param_pack(buf, "bbb", p.pi, p.pl, (__u8) p.pv.i); @@ -184,8 +184,8 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi, break; default: - IRDA_WARNING("%s: length %d not supported\n", - __func__, p.pl); + net_warn_ratelimited("%s: length %d not supported\n", + __func__, p.pl); /* Skip parameter */ return -1; } @@ -214,9 +214,8 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { - IRDA_WARNING("%s: buffer too short for parsing! " - "Need %d bytes, but len is only %d\n", - __func__, p.pl, len); + net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n", + __func__, p.pl, len); return -1; } @@ -226,9 +225,8 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, * PV_INTEGER means that the handler is flexible. */ if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) { - IRDA_ERROR("%s: invalid parameter length! " - "Expected %d bytes, but value had %d bytes!\n", - __func__, type & PV_MASK, p.pl); + net_err_ratelimited("%s: invalid parameter length! Expected %d bytes, but value had %d bytes!\n", + __func__, type & PV_MASK, p.pl); /* Most parameters are bit/byte fields or little endian, * so it's ok to only extract a subset of it (the subset @@ -265,15 +263,15 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, le32_to_cpus(&p.pv.i); break; default: - IRDA_WARNING("%s: length %d not supported\n", - __func__, p.pl); + net_warn_ratelimited("%s: length %d not supported\n", + __func__, p.pl); /* Skip parameter */ return p.pl+2; } - IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__, - p.pi, p.pl, p.pv.i); + pr_debug("%s(), pi=%#x, pl=%d, pi=%d\n", __func__, + p.pi, p.pl, p.pv.i); /* Call handler for this parameter */ err = (*func)(self, &p, PV_PUT); if (err < 0) @@ -292,21 +290,18 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, irda_param_t p; int err; - IRDA_DEBUG(2, "%s()\n", __func__); - p.pi = pi; /* In case handler needs to know */ p.pl = buf[1]; /* Extract length of value */ if (p.pl > 32) p.pl = 32; - IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__, - p.pi, p.pl); + pr_debug("%s(), pi=%#x, pl=%d\n", __func__, + p.pi, p.pl); /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { - IRDA_WARNING("%s: buffer too short for parsing! " - "Need %d bytes, but len is only %d\n", - __func__, p.pl, len); + net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n", + __func__, p.pl, len); return -1; } @@ -314,8 +309,8 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, * checked that the buffer is long enough */ strncpy(str, buf+2, p.pl); - IRDA_DEBUG(2, "%s(), str=0x%02x 0x%02x\n", __func__, - (__u8) str[0], (__u8) str[1]); + pr_debug("%s(), str=0x%02x 0x%02x\n", + __func__, (__u8)str[0], (__u8)str[1]); /* Null terminate string */ str[p.pl] = '\0'; @@ -343,13 +338,12 @@ static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi, /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { - IRDA_WARNING("%s: buffer too short for parsing! " - "Need %d bytes, but len is only %d\n", - __func__, p.pl, len); + net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n", + __func__, p.pl, len); return -1; } - IRDA_DEBUG(0, "%s(), not impl\n", __func__); + pr_debug("%s(), not impl\n", __func__); return p.pl+2; /* Extracted pl+2 bytes */ } @@ -455,7 +449,7 @@ static int irda_param_unpack(__u8 *buf, char *fmt, ...) int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len, pi_param_info_t *info) { - pi_minor_info_t *pi_minor_info; + const pi_minor_info_t *pi_minor_info; __u8 pi_minor; __u8 pi_major; int type; @@ -472,8 +466,8 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len, if ((pi_major > info->len-1) || (pi_minor > info->tables[pi_major].len-1)) { - IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", - __func__, pi); + pr_debug("%s(), no handler for parameter=0x%02x\n", + __func__, pi); /* Skip this parameter */ return -1; @@ -487,7 +481,8 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len, /* Check if handler has been implemented */ if (!pi_minor_info->func) { - IRDA_MESSAGE("%s: no handler for pi=%#x\n", __func__, pi); + net_info_ratelimited("%s: no handler for pi=%#x\n", + __func__, pi); /* Skip this parameter */ return -1; } @@ -509,7 +504,7 @@ EXPORT_SYMBOL(irda_param_insert); static int irda_param_extract(void *self, __u8 *buf, int len, pi_param_info_t *info) { - pi_minor_info_t *pi_minor_info; + const pi_minor_info_t *pi_minor_info; __u8 pi_minor; __u8 pi_major; int type; @@ -526,8 +521,8 @@ static int irda_param_extract(void *self, __u8 *buf, int len, if ((pi_major > info->len-1) || (pi_minor > info->tables[pi_major].len-1)) { - IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", - __func__, buf[0]); + pr_debug("%s(), no handler for parameter=0x%02x\n", + __func__, buf[0]); /* Skip this parameter */ return 2 + buf[n + 1]; /* Continue */ @@ -539,13 +534,13 @@ static int irda_param_extract(void *self, __u8 *buf, int len, /* Find expected data type for this parameter identifier (pi)*/ type = pi_minor_info->type; - IRDA_DEBUG(3, "%s(), pi=[%d,%d], type=%d\n", __func__, - pi_major, pi_minor, type); + pr_debug("%s(), pi=[%d,%d], type=%d\n", __func__, + pi_major, pi_minor, type); /* Check if handler has been implemented */ if (!pi_minor_info->func) { - IRDA_MESSAGE("%s: no handler for pi=%#x\n", - __func__, buf[n]); + net_info_ratelimited("%s: no handler for pi=%#x\n", + __func__, buf[n]); /* Skip this parameter */ return 2 + buf[n + 1]; /* Continue */ } diff --git a/net/irda/qos.c b/net/irda/qos.c index 11a7cc0cbc2877c7baf2105ad9dfc7880991ad98..25ba8509ad3e49536459674fb11c9df4a98f118b 100644 --- a/net/irda/qos.c +++ b/net/irda/qos.c @@ -122,7 +122,7 @@ static __u32 max_line_capacities[10][4] = { { 800000, 400000, 160000, 80000 }, /* 16000000 bps */ }; -static pi_minor_info_t pi_minor_call_table_type_0[] = { +static const pi_minor_info_t pi_minor_call_table_type_0[] = { { NULL, 0 }, /* 01 */{ irlap_param_baud_rate, PV_INTEGER | PV_LITTLE_ENDIAN }, { NULL, 0 }, @@ -134,7 +134,7 @@ static pi_minor_info_t pi_minor_call_table_type_0[] = { /* 08 */{ irlap_param_link_disconnect, PV_INT_8_BITS } }; -static pi_minor_info_t pi_minor_call_table_type_1[] = { +static const pi_minor_info_t pi_minor_call_table_type_1[] = { { NULL, 0 }, { NULL, 0 }, /* 82 */{ irlap_param_max_turn_time, PV_INT_8_BITS }, @@ -144,7 +144,7 @@ static pi_minor_info_t pi_minor_call_table_type_1[] = { /* 86 */{ irlap_param_min_turn_time, PV_INT_8_BITS }, }; -static pi_major_info_t pi_major_call_table[] = { +static const pi_major_info_t pi_major_call_table[] = { { pi_minor_call_table_type_0, 9 }, { pi_minor_call_table_type_1, 7 }, }; @@ -200,8 +200,8 @@ static int msb_index (__u16 word) * able to check precisely what's going on. If a end user sees this, * it's very likely the peer. - Jean II */ if (word == 0) { - IRDA_WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n", - __func__); + net_warn_ratelimited("%s(), Detected buggy peer, adjust null PV to 0x1!\n", + __func__); /* The only safe choice (we don't know the array size) */ word = 0x1; } @@ -342,8 +342,6 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) __u32 line_capacity; int index; - IRDA_DEBUG(2, "%s()\n", __func__); - /* * Make sure the mintt is sensible. * Main culprit : Ericsson T39. - Jean II @@ -351,8 +349,8 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) if (sysctl_min_tx_turn_time > qos->min_turn_time.value) { int i; - IRDA_WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n", - __func__, sysctl_min_tx_turn_time); + net_warn_ratelimited("%s(), Detected buggy peer, adjust mtt to %dus!\n", + __func__, sysctl_min_tx_turn_time); /* We don't really need bits, but easier this way */ i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times, @@ -368,9 +366,8 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) if ((qos->baud_rate.value < 115200) && (qos->max_turn_time.value < 500)) { - IRDA_DEBUG(0, - "%s(), adjusting max turn time from %d to 500 ms\n", - __func__, qos->max_turn_time.value); + pr_debug("%s(), adjusting max turn time from %d to 500 ms\n", + __func__, qos->max_turn_time.value); qos->max_turn_time.value = 500; } @@ -385,8 +382,8 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) #ifdef CONFIG_IRDA_DYNAMIC_WINDOW while ((qos->data_size.value > line_capacity) && (index > 0)) { qos->data_size.value = data_sizes[index--]; - IRDA_DEBUG(2, "%s(), reducing data size to %d\n", - __func__, qos->data_size.value); + pr_debug("%s(), reducing data size to %d\n", + __func__, qos->data_size.value); } #else /* Use method described in section 6.6.11 of IrLAP */ while (irlap_requested_line_capacity(qos) > line_capacity) { @@ -395,15 +392,15 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) /* Must be able to send at least one frame */ if (qos->window_size.value > 1) { qos->window_size.value--; - IRDA_DEBUG(2, "%s(), reducing window size to %d\n", - __func__, qos->window_size.value); + pr_debug("%s(), reducing window size to %d\n", + __func__, qos->window_size.value); } else if (index > 1) { qos->data_size.value = data_sizes[index--]; - IRDA_DEBUG(2, "%s(), reducing data size to %d\n", - __func__, qos->data_size.value); + pr_debug("%s(), reducing data size to %d\n", + __func__, qos->data_size.value); } else { - IRDA_WARNING("%s(), nothing more we can do!\n", - __func__); + net_warn_ratelimited("%s(), nothing more we can do!\n", + __func__); } } #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ @@ -440,20 +437,20 @@ int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb) irlap_adjust_qos_settings(&self->qos_tx); - IRDA_DEBUG(2, "Setting BAUD_RATE to %d bps.\n", - self->qos_tx.baud_rate.value); - IRDA_DEBUG(2, "Setting DATA_SIZE to %d bytes\n", - self->qos_tx.data_size.value); - IRDA_DEBUG(2, "Setting WINDOW_SIZE to %d\n", - self->qos_tx.window_size.value); - IRDA_DEBUG(2, "Setting XBOFS to %d\n", - self->qos_tx.additional_bofs.value); - IRDA_DEBUG(2, "Setting MAX_TURN_TIME to %d ms.\n", - self->qos_tx.max_turn_time.value); - IRDA_DEBUG(2, "Setting MIN_TURN_TIME to %d usecs.\n", - self->qos_tx.min_turn_time.value); - IRDA_DEBUG(2, "Setting LINK_DISC to %d secs.\n", - self->qos_tx.link_disc_time.value); + pr_debug("Setting BAUD_RATE to %d bps.\n", + self->qos_tx.baud_rate.value); + pr_debug("Setting DATA_SIZE to %d bytes\n", + self->qos_tx.data_size.value); + pr_debug("Setting WINDOW_SIZE to %d\n", + self->qos_tx.window_size.value); + pr_debug("Setting XBOFS to %d\n", + self->qos_tx.additional_bofs.value); + pr_debug("Setting MAX_TURN_TIME to %d ms.\n", + self->qos_tx.max_turn_time.value); + pr_debug("Setting MIN_TURN_TIME to %d usecs.\n", + self->qos_tx.min_turn_time.value); + pr_debug("Setting LINK_DISC to %d secs.\n", + self->qos_tx.link_disc_time.value); return ret; } @@ -537,17 +534,17 @@ static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get) if (get) { param->pv.i = self->qos_rx.baud_rate.bits; - IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n", - __func__, param->pv.i); + pr_debug("%s(), baud rate = 0x%02x\n", + __func__, param->pv.i); } else { /* * Stations must agree on baud rate, so calculate * intersection */ - IRDA_DEBUG(2, "Requested BAUD_RATE: 0x%04x\n", (__u16) param->pv.i); + pr_debug("Requested BAUD_RATE: 0x%04x\n", (__u16)param->pv.i); final = (__u16) param->pv.i & self->qos_rx.baud_rate.bits; - IRDA_DEBUG(2, "Final BAUD_RATE: 0x%04x\n", final); + pr_debug("Final BAUD_RATE: 0x%04x\n", final); self->qos_tx.baud_rate.bits = final; self->qos_rx.baud_rate.bits = final; } @@ -578,10 +575,10 @@ static int irlap_param_link_disconnect(void *instance, irda_param_t *param, * Stations must agree on link disconnect/threshold * time. */ - IRDA_DEBUG(2, "LINK_DISC: %02x\n", (__u8) param->pv.i); + pr_debug("LINK_DISC: %02x\n", (__u8)param->pv.i); final = (__u8) param->pv.i & self->qos_rx.link_disc_time.bits; - IRDA_DEBUG(2, "Final LINK_DISC: %02x\n", final); + pr_debug("Final LINK_DISC: %02x\n", final); self->qos_tx.link_disc_time.bits = final; self->qos_rx.link_disc_time.bits = final; } @@ -710,8 +707,8 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time) __u32 line_capacity; int i,j; - IRDA_DEBUG(2, "%s(), speed=%d, max_turn_time=%d\n", - __func__, speed, max_turn_time); + pr_debug("%s(), speed=%d, max_turn_time=%d\n", + __func__, speed, max_turn_time); i = value_index(speed, baud_rates, 10); j = value_index(max_turn_time, max_turn_times, 4); @@ -721,8 +718,8 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time) line_capacity = max_line_capacities[i][j]; - IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n", - __func__, line_capacity); + pr_debug("%s(), line capacity=%d bytes\n", + __func__, line_capacity); return line_capacity; } @@ -737,8 +734,8 @@ static __u32 irlap_requested_line_capacity(struct qos_info *qos) irlap_min_turn_time_in_bytes(qos->baud_rate.value, qos->min_turn_time.value); - IRDA_DEBUG(2, "%s(), requested line capacity=%d\n", - __func__, line_capacity); + pr_debug("%s(), requested line capacity=%d\n", + __func__, line_capacity); return line_capacity; } diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c index fd0995b1323a07450e0fd5b6555d56257a4a2e91..40a0f993bf13d8bf0a60b8097be0b078697a47a8 100644 --- a/net/irda/wrapper.c +++ b/net/irda/wrapper.c @@ -106,17 +106,17 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize) * Nothing to worry about, but we set the default number of * BOF's */ - IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __func__); + pr_debug("%s(), wrong magic in skb!\n", __func__); xbofs = 10; } else xbofs = cb->xbofs + cb->xbofs_delay; - IRDA_DEBUG(4, "%s(), xbofs=%d\n", __func__, xbofs); + pr_debug("%s(), xbofs=%d\n", __func__, xbofs); /* Check that we never use more than 115 + 48 xbofs */ if (xbofs > 163) { - IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __func__, - xbofs); + pr_debug("%s(), too many xbofs (%d)\n", __func__, + xbofs); xbofs = 163; } @@ -134,8 +134,8 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize) * transmitted after this point is 5. */ if(n >= (buffsize-5)) { - IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n", - __func__, n); + net_err_ratelimited("%s(), tx buffer overflow (n=%d)\n", + __func__, n); return n; } @@ -286,8 +286,8 @@ async_unwrap_bof(struct net_device *dev, case INSIDE_FRAME: /* Not supposed to happen, the previous frame is not * finished - Jean II */ - IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n", - __func__); + pr_debug("%s(), Discarding incomplete frame\n", + __func__); stats->rx_errors++; stats->rx_missed_errors++; irda_device_set_media_busy(dev, TRUE); @@ -360,7 +360,7 @@ async_unwrap_eof(struct net_device *dev, /* Wrong CRC, discard frame! */ irda_device_set_media_busy(dev, TRUE); - IRDA_DEBUG(1, "%s(), crc error\n", __func__); + pr_debug("%s(), crc error\n", __func__); stats->rx_errors++; stats->rx_crc_errors++; } @@ -386,7 +386,7 @@ async_unwrap_ce(struct net_device *dev, break; case LINK_ESCAPE: - IRDA_WARNING("%s: state not defined\n", __func__); + net_warn_ratelimited("%s: state not defined\n", __func__); break; case BEGIN_FRAME: @@ -420,8 +420,8 @@ async_unwrap_other(struct net_device *dev, rx_buff->fcs = irda_fcs(rx_buff->fcs, byte); #endif } else { - IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", - __func__); + pr_debug("%s(), Rx buffer overflow, aborting\n", + __func__); rx_buff->state = OUTSIDE_FRAME; } break; @@ -439,8 +439,8 @@ async_unwrap_other(struct net_device *dev, #endif rx_buff->state = INSIDE_FRAME; } else { - IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", - __func__); + pr_debug("%s(), Rx buffer overflow, aborting\n", + __func__); rx_buff->state = OUTSIDE_FRAME; } break; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a089b6b91650b9c17e786e1045cbeac537612dd1..2e9953b2db8402dd71c88691f263db00f2cba3e2 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1070,9 +1070,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, txmsg.class = 0; /* iterate over control messages */ - for (cmsg = CMSG_FIRSTHDR(msg); cmsg; - cmsg = CMSG_NXTHDR(msg, cmsg)) { - + for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) { err = -EINVAL; goto out; @@ -1122,7 +1120,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, } if (iucv->transport == AF_IUCV_TRANS_HIPER) skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); - if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + if (memcpy_from_msg(skb_put(skb, len), msg, len)) { err = -EFAULT; goto fail; } @@ -1355,7 +1353,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; cskb = skb; - if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { + if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; diff --git a/net/key/af_key.c b/net/key/af_key.c index 1847ec4e39305f205c68acaf50579bfceff4d91f..f8ac939d52b4b83ce3720e274f33461347029661 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3611,7 +3611,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb, goto out; err = -EFAULT; - if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) + if (memcpy_from_msg(skb_put(skb,len), msg, len)) goto out; hdr = pfkey_get_base_msg(skb, &err); @@ -3654,7 +3654,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb, } skb_reset_transport_header(skb); - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free; diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index edb78e69efe47ad3bd2895ceb1a0d5aaf61cfc38..781b3a226ba73204aa9ff68923ae2385a88e7205 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -126,6 +126,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { .ndo_uninit = l2tp_eth_dev_uninit, .ndo_start_xmit = l2tp_eth_dev_xmit, .ndo_get_stats64 = l2tp_eth_get_stats64, + .ndo_set_mac_address = eth_mac_addr, }; static void l2tp_eth_dev_setup(struct net_device *dev) diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 369a9822488c45fc10be28ca00300e9382645a9e..05dfc8aa36afc83b61e2cefeb76738ded8a4db2d 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -441,7 +441,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m *((__be32 *) skb_put(skb, 4)) = 0; /* Copy user data into skb */ - rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc < 0) { kfree_skb(skb); goto error; @@ -528,7 +528,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 0edb263cc002e84d7192e63552bcdcf260aeb6fa..8611f1b6314161d4df90c4969b3b9cb9066a446d 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -619,7 +619,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, back_from_confirm: lock_sock(sk); - err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, + err = ip6_append_data(sk, ip_generic_getfrag, msg, ulen, transhdrlen, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, msg->msg_flags, dontfrag); @@ -672,7 +672,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index b704a9356208f5cc3064085b9d82eab52542dd5a..cc7a828fc914d7e05a9495b70df4b1411e0de60e 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -208,7 +208,7 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); + err = skb_copy_datagram_msg(skb, 0, msg, len); if (likely(err == 0)) err = len; @@ -346,8 +346,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh skb_put(skb, 2); /* Copy user data into skb */ - error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov, - total_len); + error = memcpy_from_msg(skb_put(skb, total_len), m, total_len); if (error < 0) { kfree_skb(skb); goto error_put_sess_tun; diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index 3cdaa046c1bc60c8a22b46fe5233b28ed1e097bc..fc60d9d738b57b94bdbae1b5d20ec24295b87190 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c @@ -73,6 +73,7 @@ static void __lapb_remove_cb(struct lapb_cb *lapb) lapb_put(lapb); } } +EXPORT_SYMBOL(lapb_register); /* * Add a socket to the bound sockets list. @@ -195,6 +196,7 @@ out: write_unlock_bh(&lapb_list_lock); return rc; } +EXPORT_SYMBOL(lapb_unregister); int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) { @@ -227,6 +229,7 @@ int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) out: return rc; } +EXPORT_SYMBOL(lapb_getparms); int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms) { @@ -262,6 +265,7 @@ out_put: out: return rc; } +EXPORT_SYMBOL(lapb_setparms); int lapb_connect_request(struct net_device *dev) { @@ -290,6 +294,7 @@ out_put: out: return rc; } +EXPORT_SYMBOL(lapb_connect_request); int lapb_disconnect_request(struct net_device *dev) { @@ -334,6 +339,7 @@ out_put: out: return rc; } +EXPORT_SYMBOL(lapb_disconnect_request); int lapb_data_request(struct net_device *dev, struct sk_buff *skb) { @@ -355,6 +361,7 @@ out_put: out: return rc; } +EXPORT_SYMBOL(lapb_data_request); int lapb_data_received(struct net_device *dev, struct sk_buff *skb) { @@ -369,6 +376,7 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb) return rc; } +EXPORT_SYMBOL(lapb_data_received); void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) { @@ -415,15 +423,6 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) return used; } -EXPORT_SYMBOL(lapb_register); -EXPORT_SYMBOL(lapb_unregister); -EXPORT_SYMBOL(lapb_getparms); -EXPORT_SYMBOL(lapb_setparms); -EXPORT_SYMBOL(lapb_connect_request); -EXPORT_SYMBOL(lapb_disconnect_request); -EXPORT_SYMBOL(lapb_data_request); -EXPORT_SYMBOL(lapb_data_received); - static int __init lapb_init(void) { return 0; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index bb9cbc17d9267bc6f554f4ed985d3596fefcd91f..2c0b83ce43bda478f6c56ebdc0951a54658c9098 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -819,8 +819,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, used = len; if (!(flags & MSG_TRUNC)) { - int rc = skb_copy_datagram_iovec(skb, offset, - msg->msg_iov, used); + int rc = skb_copy_datagram_msg(skb, offset, msg, used); if (rc) { /* Exception. Bailout! */ if (!copied) @@ -922,7 +921,7 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock, skb->dev = llc->dev; skb->protocol = llc_proto_type(addr->sllc_arphrd); skb_reserve(skb, hdrlen); - rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied); + rc = memcpy_from_msg(skb_put(skb, copied), msg, copied); if (rc) goto out; if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) { diff --git a/net/llc/llc_c_st.c b/net/llc/llc_c_st.c index 818a9428823bdb751a3414eea3c5aac080328481..2467573b5f84af42b890757ae1e48f34afca6eb5 100644 --- a/net/llc/llc_c_st.c +++ b/net/llc/llc_c_st.c @@ -33,7 +33,7 @@ * LLC_CONN_STATE_AWAIT_REJ states */ /* State transitions for LLC_CONN_EV_DISC_REQ event */ -static llc_conn_action_t llc_common_actions_1[] = { +static const llc_conn_action_t llc_common_actions_1[] = { [0] = llc_conn_ac_send_disc_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -50,7 +50,7 @@ static struct llc_conn_state_trans llc_common_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_RESET_REQ event */ -static llc_conn_action_t llc_common_actions_2[] = { +static const llc_conn_action_t llc_common_actions_2[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -67,7 +67,7 @@ static struct llc_conn_state_trans llc_common_state_trans_2 = { }; /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_common_actions_3[] = { +static const llc_conn_action_t llc_common_actions_3[] = { [0] = llc_conn_ac_stop_all_timers, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, @@ -87,7 +87,7 @@ static struct llc_conn_state_trans llc_common_state_trans_3 = { }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_common_actions_4[] = { +static const llc_conn_action_t llc_common_actions_4[] = { [0] = llc_conn_ac_stop_all_timers, [1] = llc_conn_ac_send_ua_rsp_f_set_p, [2] = llc_conn_ac_disc_ind, @@ -103,7 +103,7 @@ static struct llc_conn_state_trans llc_common_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */ -static llc_conn_action_t llc_common_actions_5[] = { +static const llc_conn_action_t llc_common_actions_5[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -122,7 +122,7 @@ static struct llc_conn_state_trans llc_common_state_trans_5 = { }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */ -static llc_conn_action_t llc_common_actions_6[] = { +static const llc_conn_action_t llc_common_actions_6[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_ac_stop_all_timers, [2] = llc_conn_disc, @@ -137,7 +137,7 @@ static struct llc_conn_state_trans llc_common_state_trans_6 = { }; /* State transitions for LLC_CONN_EV_RX_ZZZ_CMD_Pbit_SET_X_INVAL_Nr event */ -static llc_conn_action_t llc_common_actions_7a[] = { +static const llc_conn_action_t llc_common_actions_7a[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -153,7 +153,7 @@ static struct llc_conn_state_trans llc_common_state_trans_7a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_INVAL_Ns event */ -static llc_conn_action_t llc_common_actions_7b[] = { +static const llc_conn_action_t llc_common_actions_7b[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -169,7 +169,7 @@ static struct llc_conn_state_trans llc_common_state_trans_7b = { }; /* State transitions for LLC_CONN_EV_RX_ZZZ_RSP_Fbit_SET_X_INVAL_Nr event */ -static llc_conn_action_t llc_common_actions_8a[] = { +static const llc_conn_action_t llc_common_actions_8a[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -185,7 +185,7 @@ static struct llc_conn_state_trans llc_common_state_trans_8a = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_INVAL_Ns event */ -static llc_conn_action_t llc_common_actions_8b[] = { +static const llc_conn_action_t llc_common_actions_8b[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -201,7 +201,7 @@ static struct llc_conn_state_trans llc_common_state_trans_8b = { }; /* State transitions for LLC_CONN_EV_RX_BAD_PDU event */ -static llc_conn_action_t llc_common_actions_8c[] = { +static const llc_conn_action_t llc_common_actions_8c[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -217,7 +217,7 @@ static struct llc_conn_state_trans llc_common_state_trans_8c = { }; /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */ -static llc_conn_action_t llc_common_actions_9[] = { +static const llc_conn_action_t llc_common_actions_9[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -234,12 +234,12 @@ static struct llc_conn_state_trans llc_common_state_trans_9 = { /* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_1 event */ #if 0 -static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_10[] = { +static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_10[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_common_actions_10[] = { +static const llc_conn_action_t llc_common_actions_10[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -256,12 +256,12 @@ static struct llc_conn_state_trans llc_common_state_trans_10 = { #endif /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11a[] = { +static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11a[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; -static llc_conn_action_t llc_common_actions_11a[] = { +static const llc_conn_action_t llc_common_actions_11a[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -278,12 +278,12 @@ static struct llc_conn_state_trans llc_common_state_trans_11a = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11b[] = { +static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11b[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; -static llc_conn_action_t llc_common_actions_11b[] = { +static const llc_conn_action_t llc_common_actions_11b[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -300,12 +300,12 @@ static struct llc_conn_state_trans llc_common_state_trans_11b = { }; /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11c[] = { +static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11c[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; -static llc_conn_action_t llc_common_actions_11c[] = { +static const llc_conn_action_t llc_common_actions_11c[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -322,12 +322,12 @@ static struct llc_conn_state_trans llc_common_state_trans_11c = { }; /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11d[] = { +static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11d[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; -static llc_conn_action_t llc_common_actions_11d[] = { +static const llc_conn_action_t llc_common_actions_11d[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, @@ -351,7 +351,7 @@ static struct llc_conn_state_trans llc_common_state_trans_end; /* LLC_CONN_STATE_ADM transitions */ /* State transitions for LLC_CONN_EV_CONN_REQ event */ -static llc_conn_action_t llc_adm_actions_1[] = { +static const llc_conn_action_t llc_adm_actions_1[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_set_retry_cnt_0, @@ -367,7 +367,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_adm_actions_2[] = { +static const llc_conn_action_t llc_adm_actions_2[] = { [0] = llc_conn_ac_send_ua_rsp_f_set_p, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, @@ -386,7 +386,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_2 = { }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_adm_actions_3[] = { +static const llc_conn_action_t llc_adm_actions_3[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_disc, [2] = NULL, @@ -400,7 +400,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_3 = { }; /* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_adm_actions_4[] = { +static const llc_conn_action_t llc_adm_actions_4[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_1, [1] = llc_conn_disc, [2] = NULL, @@ -414,7 +414,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_RX_XXX_YYY event */ -static llc_conn_action_t llc_adm_actions_5[] = { +static const llc_conn_action_t llc_adm_actions_5[] = { [0] = llc_conn_disc, [1] = NULL, }; @@ -445,7 +445,7 @@ static struct llc_conn_state_trans *llc_adm_state_transitions[] = { /* LLC_CONN_STATE_SETUP transitions */ /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_setup_actions_1[] = { +static const llc_conn_action_t llc_setup_actions_1[] = { [0] = llc_conn_ac_send_ua_rsp_f_set_p, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, @@ -461,13 +461,13 @@ static struct llc_conn_state_trans llc_setup_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */ -static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_2[] = { +static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_set_status_conn, [2] = NULL, }; -static llc_conn_action_t llc_setup_actions_2[] = { +static const llc_conn_action_t llc_setup_actions_2[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, @@ -485,13 +485,13 @@ static struct llc_conn_state_trans llc_setup_state_trans_2 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_3[] = { +static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_s_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_conn, [2] = NULL, }; -static llc_conn_action_t llc_setup_actions_3[] = { +static const llc_conn_action_t llc_setup_actions_3[] = { [0] = llc_conn_ac_set_p_flag_0, [1] = llc_conn_ac_set_remote_busy_0, [2] = llc_conn_ac_conn_confirm, @@ -506,12 +506,12 @@ static struct llc_conn_state_trans llc_setup_state_trans_3 = { }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ -static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_4[] = { +static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_set_status_disc, [1] = NULL, }; -static llc_conn_action_t llc_setup_actions_4[] = { +static const llc_conn_action_t llc_setup_actions_4[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_ac_conn_confirm, @@ -527,12 +527,12 @@ static struct llc_conn_state_trans llc_setup_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */ -static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_5[] = { +static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_5[] = { [0] = llc_conn_ev_qlfy_set_status_disc, [1] = NULL, }; -static llc_conn_action_t llc_setup_actions_5[] = { +static const llc_conn_action_t llc_setup_actions_5[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_conn_confirm, [2] = llc_conn_disc, @@ -547,13 +547,13 @@ static struct llc_conn_state_trans llc_setup_state_trans_5 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_7[] = { +static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = NULL, }; -static llc_conn_action_t llc_setup_actions_7[] = { +static const llc_conn_action_t llc_setup_actions_7[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -568,14 +568,14 @@ static struct llc_conn_state_trans llc_setup_state_trans_7 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_8[] = { +static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = llc_conn_ev_qlfy_set_status_failed, [3] = NULL, }; -static llc_conn_action_t llc_setup_actions_8[] = { +static const llc_conn_action_t llc_setup_actions_8[] = { [0] = llc_conn_ac_conn_confirm, [1] = llc_conn_disc, [2] = NULL, @@ -609,14 +609,14 @@ static struct llc_conn_state_trans *llc_setup_state_transitions[] = { /* LLC_CONN_STATE_NORMAL transitions */ /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_1[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = llc_conn_ev_qlfy_last_frame_eq_0, [3] = NULL, }; -static llc_conn_action_t llc_normal_actions_1[] = { +static const llc_conn_action_t llc_normal_actions_1[] = { [0] = llc_conn_ac_send_i_as_ack, [1] = llc_conn_ac_start_ack_tmr_if_not_running, [2] = NULL, @@ -630,14 +630,14 @@ static struct llc_conn_state_trans llc_normal_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = llc_conn_ev_qlfy_last_frame_eq_1, [3] = NULL, }; -static llc_conn_action_t llc_normal_actions_2[] = { +static const llc_conn_action_t llc_normal_actions_2[] = { [0] = llc_conn_ac_send_i_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, @@ -651,14 +651,14 @@ static struct llc_conn_state_trans llc_normal_state_trans_2 = { }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2_1[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_1, [1] = llc_conn_ev_qlfy_set_status_remote_busy, [2] = NULL, }; /* just one member, NULL, .bss zeroes it */ -static llc_conn_action_t llc_normal_actions_2_1[1]; +static const llc_conn_action_t llc_normal_actions_2_1[1]; static struct llc_conn_state_trans llc_normal_state_trans_2_1 = { .ev = llc_conn_ev_data_req, @@ -668,12 +668,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_2_1 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_3[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_3[] = { +static const llc_conn_action_t llc_normal_actions_3[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rnr_xxx_x_set_0, [2] = llc_conn_ac_set_data_flag_0, @@ -688,12 +688,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_3 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_4[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_4[] = { +static const llc_conn_action_t llc_normal_actions_4[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rnr_xxx_x_set_0, [2] = llc_conn_ac_set_data_flag_0, @@ -708,12 +708,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5a[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_5a[] = { +static const llc_conn_action_t llc_normal_actions_5a[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, @@ -731,12 +731,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_5a = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5b[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_5b[] = { +static const llc_conn_action_t llc_normal_actions_5b[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, @@ -754,12 +754,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_5b = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5c[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_5c[] = { +static const llc_conn_action_t llc_normal_actions_5c[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, @@ -777,12 +777,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_5c = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6a[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_6a[] = { +static const llc_conn_action_t llc_normal_actions_6a[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, @@ -798,12 +798,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_6a = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6b[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_6b[] = { +static const llc_conn_action_t llc_normal_actions_6b[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, @@ -819,7 +819,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_6b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ -static llc_conn_action_t llc_normal_actions_7[] = { +static const llc_conn_action_t llc_normal_actions_7[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_rsp_f_set_1, [2] = llc_conn_ac_upd_nr_received, @@ -835,12 +835,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_7 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8a[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_8[] = { +static const llc_conn_action_t llc_normal_actions_8[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, @@ -858,7 +858,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_8a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8b[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; @@ -871,12 +871,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_8b = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9a[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_9a[] = { +static const llc_conn_action_t llc_normal_actions_9a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_data_ind, @@ -892,12 +892,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_9a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9b[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_9b[] = { +static const llc_conn_action_t llc_normal_actions_9b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_data_ind, @@ -913,7 +913,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_9b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_normal_actions_10[] = { +static const llc_conn_action_t llc_normal_actions_10[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_send_ack_rsp_f_set_1, [2] = llc_conn_ac_rst_sendack_flag, @@ -930,7 +930,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_10 = { }; /* State transitions for * LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_normal_actions_11a[] = { +static const llc_conn_action_t llc_normal_actions_11a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -945,7 +945,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_11a = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_normal_actions_11b[] = { +static const llc_conn_action_t llc_normal_actions_11b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -960,12 +960,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_11b = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_11c[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_11c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_11c[] = { +static const llc_conn_action_t llc_normal_actions_11c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_inc_tx_win_size, @@ -981,7 +981,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_11c = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_normal_actions_12[] = { +static const llc_conn_action_t llc_normal_actions_12[] = { [0] = llc_conn_ac_send_ack_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_adjust_npta_by_rr, @@ -998,7 +998,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_12 = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_normal_actions_13a[] = { +static const llc_conn_action_t llc_normal_actions_13a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -1013,7 +1013,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_13a = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_normal_actions_13b[] = { +static const llc_conn_action_t llc_normal_actions_13b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -1028,12 +1028,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_13b = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_13c[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_13c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_13c[] = { +static const llc_conn_action_t llc_normal_actions_13c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -1048,7 +1048,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_13c = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_normal_actions_14[] = { +static const llc_conn_action_t llc_normal_actions_14[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_adjust_npta_by_rnr, @@ -1065,12 +1065,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_14 = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15a[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_15a[] = { +static const llc_conn_action_t llc_normal_actions_15a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, @@ -1088,12 +1088,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_15a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15b[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_15b[] = { +static const llc_conn_action_t llc_normal_actions_15b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, @@ -1111,12 +1111,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_15b = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16a[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_16a[] = { +static const llc_conn_action_t llc_normal_actions_16a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_dec_tx_win_size, @@ -1133,12 +1133,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_16a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16b[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_16b[] = { +static const llc_conn_action_t llc_normal_actions_16b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_dec_tx_win_size, @@ -1155,7 +1155,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_16b = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_normal_actions_17[] = { +static const llc_conn_action_t llc_normal_actions_17[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_dec_tx_win_size, @@ -1172,12 +1172,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_17 = { }; /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_18[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_18[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_18[] = { +static const llc_conn_action_t llc_normal_actions_18[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, @@ -1191,12 +1191,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_18 = { }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_19[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_19[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_19[] = { +static const llc_conn_action_t llc_normal_actions_19[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rr_cmd_p_set_1, [2] = llc_conn_ac_rst_vs, @@ -1213,13 +1213,13 @@ static struct llc_conn_state_trans llc_normal_state_trans_19 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20a[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; -static llc_conn_action_t llc_normal_actions_20a[] = { +static const llc_conn_action_t llc_normal_actions_20a[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rr_cmd_p_set_1, [2] = llc_conn_ac_rst_vs, @@ -1236,13 +1236,13 @@ static struct llc_conn_state_trans llc_normal_state_trans_20a = { }; /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20b[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; -static llc_conn_action_t llc_normal_actions_20b[] = { +static const llc_conn_action_t llc_normal_actions_20b[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rr_cmd_p_set_1, [2] = llc_conn_ac_rst_vs, @@ -1259,12 +1259,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_20b = { }; /* State transitions for LLC_CONN_EV_TX_BUFF_FULL event */ -static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_21[] = { +static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_21[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_normal_actions_21[] = { +static const llc_conn_action_t llc_normal_actions_21[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, @@ -1342,13 +1342,13 @@ static struct llc_conn_state_trans *llc_normal_state_transitions[] = { /* LLC_CONN_STATE_BUSY transitions */ /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_1[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_1[] = { +static const llc_conn_action_t llc_busy_actions_1[] = { [0] = llc_conn_ac_send_i_xxx_x_set_0, [1] = llc_conn_ac_start_ack_tmr_if_not_running, [2] = NULL, @@ -1362,13 +1362,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_2[] = { +static const llc_conn_action_t llc_busy_actions_2[] = { [0] = llc_conn_ac_send_i_xxx_x_set_0, [1] = llc_conn_ac_start_ack_tmr_if_not_running, [2] = NULL, @@ -1382,14 +1382,14 @@ static struct llc_conn_state_trans llc_busy_state_trans_2 = { }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2_1[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_1, [1] = llc_conn_ev_qlfy_set_status_remote_busy, [2] = NULL, }; /* just one member, NULL, .bss zeroes it */ -static llc_conn_action_t llc_busy_actions_2_1[1]; +static const llc_conn_action_t llc_busy_actions_2_1[1]; static struct llc_conn_state_trans llc_busy_state_trans_2_1 = { .ev = llc_conn_ev_data_req, @@ -1399,13 +1399,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_2_1 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_3[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_1, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_3[] = { +static const llc_conn_action_t llc_busy_actions_3[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_start_rej_timer, [2] = NULL, @@ -1419,13 +1419,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_3 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_4[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_1, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_4[] = { +static const llc_conn_action_t llc_busy_actions_4[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_start_rej_timer, [2] = NULL, @@ -1439,13 +1439,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_5[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_5[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_5[] = { +static const llc_conn_action_t llc_busy_actions_5[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; @@ -1458,13 +1458,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_5 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_6[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_6[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_6[] = { +static const llc_conn_action_t llc_busy_actions_6[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; @@ -1477,13 +1477,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_6 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_7[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_2, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_7[] = { +static const llc_conn_action_t llc_busy_actions_7[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; @@ -1496,13 +1496,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_7 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_8[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_2, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_8[] = { +static const llc_conn_action_t llc_busy_actions_8[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; @@ -1515,12 +1515,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_8 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9a[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_9a[] = { +static const llc_conn_action_t llc_busy_actions_9a[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_upd_nr_received, @@ -1537,12 +1537,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_9a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9b[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_9b[] = { +static const llc_conn_action_t llc_busy_actions_9b[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_upd_nr_received, @@ -1559,12 +1559,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_9b = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10a[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_10a[] = { +static const llc_conn_action_t llc_busy_actions_10a[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, @@ -1579,12 +1579,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_10a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10b[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_10b[] = { +static const llc_conn_action_t llc_busy_actions_10b[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, @@ -1599,7 +1599,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_10b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ -static llc_conn_action_t llc_busy_actions_11[] = { +static const llc_conn_action_t llc_busy_actions_11[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, @@ -1614,7 +1614,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_11 = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_busy_actions_12[] = { +static const llc_conn_action_t llc_busy_actions_12[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rnr_rsp_f_set_1, @@ -1632,12 +1632,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_12 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13a[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_13a[] = { +static const llc_conn_action_t llc_busy_actions_13a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, @@ -1657,12 +1657,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_13a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13b[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_13b[] = { +static const llc_conn_action_t llc_busy_actions_13b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, @@ -1682,12 +1682,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_13b = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14a[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_14a[] = { +static const llc_conn_action_t llc_busy_actions_14a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, @@ -1705,12 +1705,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_14a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14b[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_14b[] = { +static const llc_conn_action_t llc_busy_actions_14b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, @@ -1728,7 +1728,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_14b = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_busy_actions_15a[] = { +static const llc_conn_action_t llc_busy_actions_15a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -1743,7 +1743,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_15a = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_busy_actions_15b[] = { +static const llc_conn_action_t llc_busy_actions_15b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -1758,12 +1758,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_15b = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_15c[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_15c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_15c[] = { +static const llc_conn_action_t llc_busy_actions_15c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -1778,7 +1778,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_15c = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_busy_actions_16[] = { +static const llc_conn_action_t llc_busy_actions_16[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -1793,7 +1793,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_16 = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_busy_actions_17a[] = { +static const llc_conn_action_t llc_busy_actions_17a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -1808,7 +1808,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_17a = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_busy_actions_17b[] = { +static const llc_conn_action_t llc_busy_actions_17b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -1823,12 +1823,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_17b = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_17c[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_17c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_17c[] = { +static const llc_conn_action_t llc_busy_actions_17c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -1843,7 +1843,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_17c = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_busy_actions_18[] = { +static const llc_conn_action_t llc_busy_actions_18[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -1858,12 +1858,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_18 = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19a[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_19a[] = { +static const llc_conn_action_t llc_busy_actions_19a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, @@ -1880,12 +1880,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_19a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19b[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_19b[] = { +static const llc_conn_action_t llc_busy_actions_19b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, @@ -1902,12 +1902,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_19b = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20a[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_20a[] = { +static const llc_conn_action_t llc_busy_actions_20a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_xxx_x_set_0, @@ -1923,12 +1923,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_20a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20b[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_20b[] = { +static const llc_conn_action_t llc_busy_actions_20b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_xxx_x_set_0, @@ -1944,7 +1944,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_20b = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_busy_actions_21[] = { +static const llc_conn_action_t llc_busy_actions_21[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_send_rnr_rsp_f_set_1, @@ -1961,12 +1961,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_21 = { }; /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_22[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_22[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_22[] = { +static const llc_conn_action_t llc_busy_actions_22[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, @@ -1980,12 +1980,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_22 = { }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_23[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_23[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; -static llc_conn_action_t llc_busy_actions_23[] = { +static const llc_conn_action_t llc_busy_actions_23[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_rst_vs, [2] = llc_conn_ac_start_p_timer, @@ -2001,13 +2001,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_23 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24a[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_24a[] = { +static const llc_conn_action_t llc_busy_actions_24a[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -2023,13 +2023,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_24a = { }; /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24b[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_24b[] = { +static const llc_conn_action_t llc_busy_actions_24b[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -2045,13 +2045,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_24b = { }; /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_25[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_25[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_25[] = { +static const llc_conn_action_t llc_busy_actions_25[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -2068,13 +2068,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_25 = { }; /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_26[] = { +static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_26[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; -static llc_conn_action_t llc_busy_actions_26[] = { +static const llc_conn_action_t llc_busy_actions_26[] = { [0] = llc_conn_ac_set_data_flag_1, [1] = NULL, }; @@ -2155,13 +2155,13 @@ static struct llc_conn_state_trans *llc_busy_state_transitions[] = { /* LLC_CONN_STATE_REJ transitions */ /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_1[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; -static llc_conn_action_t llc_reject_actions_1[] = { +static const llc_conn_action_t llc_reject_actions_1[] = { [0] = llc_conn_ac_send_i_xxx_x_set_0, [1] = NULL, }; @@ -2174,13 +2174,13 @@ static struct llc_conn_state_trans llc_reject_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; -static llc_conn_action_t llc_reject_actions_2[] = { +static const llc_conn_action_t llc_reject_actions_2[] = { [0] = llc_conn_ac_send_i_xxx_x_set_0, [1] = NULL, }; @@ -2193,14 +2193,14 @@ static struct llc_conn_state_trans llc_reject_state_trans_2 = { }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2_1[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_1, [1] = llc_conn_ev_qlfy_set_status_remote_busy, [2] = NULL, }; /* just one member, NULL, .bss zeroes it */ -static llc_conn_action_t llc_reject_actions_2_1[1]; +static const llc_conn_action_t llc_reject_actions_2_1[1]; static struct llc_conn_state_trans llc_reject_state_trans_2_1 = { .ev = llc_conn_ev_data_req, @@ -2211,12 +2211,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_2_1 = { /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_3[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_3[] = { +static const llc_conn_action_t llc_reject_actions_3[] = { [0] = llc_conn_ac_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_set_data_flag_2, [2] = NULL, @@ -2230,12 +2230,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_3 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_4[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_4[] = { +static const llc_conn_action_t llc_reject_actions_4[] = { [0] = llc_conn_ac_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_set_data_flag_2, [2] = NULL, @@ -2249,7 +2249,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ -static llc_conn_action_t llc_reject_actions_5a[] = { +static const llc_conn_action_t llc_reject_actions_5a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_clear_remote_busy_if_f_eq_1, @@ -2264,7 +2264,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_5a = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ -static llc_conn_action_t llc_reject_actions_5b[] = { +static const llc_conn_action_t llc_reject_actions_5b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_clear_remote_busy_if_f_eq_1, @@ -2279,12 +2279,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_5b = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_5c[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_5c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_5c[] = { +static const llc_conn_action_t llc_reject_actions_5c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_clear_remote_busy_if_f_eq_1, @@ -2299,7 +2299,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_5c = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ -static llc_conn_action_t llc_reject_actions_6[] = { +static const llc_conn_action_t llc_reject_actions_6[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = NULL, @@ -2313,12 +2313,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_6 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7a[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_7a[] = { +static const llc_conn_action_t llc_reject_actions_7a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, @@ -2338,12 +2338,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_7a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7b[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_7b[] = { +static const llc_conn_action_t llc_reject_actions_7b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, @@ -2362,12 +2362,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_7b = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8a[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_8a[] = { +static const llc_conn_action_t llc_reject_actions_8a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_ack_xxx_x_set_0, @@ -2384,12 +2384,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_8a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8b[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_8b[] = { +static const llc_conn_action_t llc_reject_actions_8b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_ack_xxx_x_set_0, @@ -2406,7 +2406,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_8b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_reject_actions_9[] = { +static const llc_conn_action_t llc_reject_actions_9[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_ack_rsp_f_set_1, @@ -2423,7 +2423,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_9 = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_reject_actions_10a[] = { +static const llc_conn_action_t llc_reject_actions_10a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -2438,7 +2438,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_10a = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_reject_actions_10b[] = { +static const llc_conn_action_t llc_reject_actions_10b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -2453,12 +2453,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_10b = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_10c[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_10c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_10c[] = { +static const llc_conn_action_t llc_reject_actions_10c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -2473,7 +2473,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_10c = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_reject_actions_11[] = { +static const llc_conn_action_t llc_reject_actions_11[] = { [0] = llc_conn_ac_send_ack_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, @@ -2488,7 +2488,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_11 = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_reject_actions_12a[] = { +static const llc_conn_action_t llc_reject_actions_12a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -2503,7 +2503,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_12a = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_reject_actions_12b[] = { +static const llc_conn_action_t llc_reject_actions_12b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -2518,12 +2518,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_12b = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_12c[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_12c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_12c[] = { +static const llc_conn_action_t llc_reject_actions_12c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -2538,7 +2538,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_12c = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_reject_actions_13[] = { +static const llc_conn_action_t llc_reject_actions_13[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, @@ -2553,12 +2553,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_13 = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14a[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_14a[] = { +static const llc_conn_action_t llc_reject_actions_14a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, @@ -2575,12 +2575,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_14a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14b[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_14b[] = { +static const llc_conn_action_t llc_reject_actions_14b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, @@ -2597,12 +2597,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_14b = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15a[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_15a[] = { +static const llc_conn_action_t llc_reject_actions_15a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_xxx_x_set_0, @@ -2618,12 +2618,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_15a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15b[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_15b[] = { +static const llc_conn_action_t llc_reject_actions_15b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_xxx_x_set_0, @@ -2639,7 +2639,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_15b = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_reject_actions_16[] = { +static const llc_conn_action_t llc_reject_actions_16[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_rsp_f_set_1, @@ -2655,12 +2655,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_16 = { }; /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_17[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_17[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_17[] = { +static const llc_conn_action_t llc_reject_actions_17[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, @@ -2674,13 +2674,13 @@ static struct llc_conn_state_trans llc_reject_state_trans_17 = { }; /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_18[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_18[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; -static llc_conn_action_t llc_reject_actions_18[] = { +static const llc_conn_action_t llc_reject_actions_18[] = { [0] = llc_conn_ac_send_rej_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_start_rej_timer, @@ -2696,12 +2696,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_18 = { }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_19[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_19[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; -static llc_conn_action_t llc_reject_actions_19[] = { +static const llc_conn_action_t llc_reject_actions_19[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_start_rej_timer, @@ -2718,13 +2718,13 @@ static struct llc_conn_state_trans llc_reject_state_trans_19 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20a[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; -static llc_conn_action_t llc_reject_actions_20a[] = { +static const llc_conn_action_t llc_reject_actions_20a[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_start_rej_timer, @@ -2741,13 +2741,13 @@ static struct llc_conn_state_trans llc_reject_state_trans_20a = { }; /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20b[] = { +static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; -static llc_conn_action_t llc_reject_actions_20b[] = { +static const llc_conn_action_t llc_reject_actions_20b[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_start_rej_timer, @@ -2826,13 +2826,13 @@ static struct llc_conn_state_trans *llc_reject_state_transitions[] = { /* LLC_CONN_STATE_AWAIT transitions */ /* State transitions for LLC_CONN_EV_DATA_REQ event */ -static llc_conn_ev_qfyr_t llc_await_ev_qfyrs_1_0[] = { +static const llc_conn_ev_qfyr_t llc_await_ev_qfyrs_1_0[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ -static llc_conn_action_t llc_await_actions_1_0[1]; +static const llc_conn_action_t llc_await_actions_1_0[1]; static struct llc_conn_state_trans llc_await_state_trans_1_0 = { .ev = llc_conn_ev_data_req, @@ -2842,7 +2842,7 @@ static struct llc_conn_state_trans llc_await_state_trans_1_0 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ -static llc_conn_action_t llc_await_actions_1[] = { +static const llc_conn_action_t llc_await_actions_1[] = { [0] = llc_conn_ac_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_set_data_flag_0, [2] = NULL, @@ -2856,7 +2856,7 @@ static struct llc_conn_state_trans llc_await_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_actions_2[] = { +static const llc_conn_action_t llc_await_actions_2[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -2875,7 +2875,7 @@ static struct llc_conn_state_trans llc_await_state_trans_2 = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_actions_3a[] = { +static const llc_conn_action_t llc_await_actions_3a[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -2891,7 +2891,7 @@ static struct llc_conn_state_trans llc_await_state_trans_3a = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_actions_3b[] = { +static const llc_conn_action_t llc_await_actions_3b[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -2907,7 +2907,7 @@ static struct llc_conn_state_trans llc_await_state_trans_3b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_actions_4[] = { +static const llc_conn_action_t llc_await_actions_4[] = { [0] = llc_conn_ac_send_rej_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -2924,7 +2924,7 @@ static struct llc_conn_state_trans llc_await_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_actions_5[] = { +static const llc_conn_action_t llc_await_actions_5[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_stop_p_timer, @@ -2943,7 +2943,7 @@ static struct llc_conn_state_trans llc_await_state_trans_5 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_actions_6a[] = { +static const llc_conn_action_t llc_await_actions_6a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_xxx_x_set_0, @@ -2960,7 +2960,7 @@ static struct llc_conn_state_trans llc_await_state_trans_6a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_actions_6b[] = { +static const llc_conn_action_t llc_await_actions_6b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_xxx_x_set_0, @@ -2977,7 +2977,7 @@ static struct llc_conn_state_trans llc_await_state_trans_6b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_actions_7[] = { +static const llc_conn_action_t llc_await_actions_7[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_rsp_f_set_1, @@ -2994,7 +2994,7 @@ static struct llc_conn_state_trans llc_await_state_trans_7 = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_actions_8a[] = { +static const llc_conn_action_t llc_await_actions_8a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -3011,7 +3011,7 @@ static struct llc_conn_state_trans llc_await_state_trans_8a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_actions_8b[] = { +static const llc_conn_action_t llc_await_actions_8b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -3028,7 +3028,7 @@ static struct llc_conn_state_trans llc_await_state_trans_8b = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_actions_9a[] = { +static const llc_conn_action_t llc_await_actions_9a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3043,7 +3043,7 @@ static struct llc_conn_state_trans llc_await_state_trans_9a = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_actions_9b[] = { +static const llc_conn_action_t llc_await_actions_9b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3058,7 +3058,7 @@ static struct llc_conn_state_trans llc_await_state_trans_9b = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_actions_9c[] = { +static const llc_conn_action_t llc_await_actions_9c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3073,7 +3073,7 @@ static struct llc_conn_state_trans llc_await_state_trans_9c = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_actions_9d[] = { +static const llc_conn_action_t llc_await_actions_9d[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3088,7 +3088,7 @@ static struct llc_conn_state_trans llc_await_state_trans_9d = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_actions_10a[] = { +static const llc_conn_action_t llc_await_actions_10a[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3104,7 +3104,7 @@ static struct llc_conn_state_trans llc_await_state_trans_10a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_actions_10b[] = { +static const llc_conn_action_t llc_await_actions_10b[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3120,7 +3120,7 @@ static struct llc_conn_state_trans llc_await_state_trans_10b = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_actions_11[] = { +static const llc_conn_action_t llc_await_actions_11[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -3136,7 +3136,7 @@ static struct llc_conn_state_trans llc_await_state_trans_11 = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_actions_12a[] = { +static const llc_conn_action_t llc_await_actions_12a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, @@ -3151,7 +3151,7 @@ static struct llc_conn_state_trans llc_await_state_trans_12a = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_actions_12b[] = { +static const llc_conn_action_t llc_await_actions_12b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, @@ -3166,7 +3166,7 @@ static struct llc_conn_state_trans llc_await_state_trans_12b = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_actions_13[] = { +static const llc_conn_action_t llc_await_actions_13[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3182,12 +3182,12 @@ static struct llc_conn_state_trans llc_await_state_trans_13 = { }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_await_ev_qfyrs_14[] = { +static const llc_conn_ev_qfyr_t llc_await_ev_qfyrs_14[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; -static llc_conn_action_t llc_await_actions_14[] = { +static const llc_conn_action_t llc_await_actions_14[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -3255,13 +3255,13 @@ static struct llc_conn_state_trans *llc_await_state_transitions[] = { /* LLC_CONN_STATE_AWAIT_BUSY transitions */ /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */ -static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1_0[] = { +static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1_0[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ -static llc_conn_action_t llc_await_busy_actions_1_0[1]; +static const llc_conn_action_t llc_await_busy_actions_1_0[1]; static struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = { .ev = llc_conn_ev_data_req, @@ -3271,12 +3271,12 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ -static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1[] = { +static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_1, [1] = NULL, }; -static llc_conn_action_t llc_await_busy_actions_1[] = { +static const llc_conn_action_t llc_await_busy_actions_1[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_start_rej_timer, [2] = NULL, @@ -3290,12 +3290,12 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ -static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_2[] = { +static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_0, [1] = NULL, }; -static llc_conn_action_t llc_await_busy_actions_2[] = { +static const llc_conn_action_t llc_await_busy_actions_2[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; @@ -3308,12 +3308,12 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_2 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ -static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_3[] = { +static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_2, [1] = NULL, }; -static llc_conn_action_t llc_await_busy_actions_3[] = { +static const llc_conn_action_t llc_await_busy_actions_3[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; @@ -3326,7 +3326,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_3 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_busy_actions_4[] = { +static const llc_conn_action_t llc_await_busy_actions_4[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3345,7 +3345,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_busy_actions_5a[] = { +static const llc_conn_action_t llc_await_busy_actions_5a[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3361,7 +3361,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_5a = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_busy_actions_5b[] = { +static const llc_conn_action_t llc_await_busy_actions_5b[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3377,7 +3377,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_5b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_busy_actions_6[] = { +static const llc_conn_action_t llc_await_busy_actions_6[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3393,7 +3393,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_6 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_busy_actions_7[] = { +static const llc_conn_action_t llc_await_busy_actions_7[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_inc_vr_by_1, [2] = llc_conn_ac_data_ind, @@ -3414,7 +3414,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_7 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_busy_actions_8a[] = { +static const llc_conn_action_t llc_await_busy_actions_8a[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_inc_vr_by_1, [2] = llc_conn_ac_data_ind, @@ -3432,7 +3432,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_8a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_busy_actions_8b[] = { +static const llc_conn_action_t llc_await_busy_actions_8b[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_inc_vr_by_1, [2] = llc_conn_ac_data_ind, @@ -3450,7 +3450,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_8b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_busy_actions_9[] = { +static const llc_conn_action_t llc_await_busy_actions_9[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_inc_vr_by_1, [2] = llc_conn_ac_data_ind, @@ -3468,7 +3468,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_9 = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_busy_actions_10a[] = { +static const llc_conn_action_t llc_await_busy_actions_10a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -3485,7 +3485,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_10a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_busy_actions_10b[] = { +static const llc_conn_action_t llc_await_busy_actions_10b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -3502,7 +3502,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_10b = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_busy_actions_11a[] = { +static const llc_conn_action_t llc_await_busy_actions_11a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3517,7 +3517,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_11a = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_busy_actions_11b[] = { +static const llc_conn_action_t llc_await_busy_actions_11b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3532,7 +3532,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_11b = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_busy_actions_11c[] = { +static const llc_conn_action_t llc_await_busy_actions_11c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3547,7 +3547,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_11c = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_busy_actions_11d[] = { +static const llc_conn_action_t llc_await_busy_actions_11d[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3562,7 +3562,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_11d = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_busy_actions_12a[] = { +static const llc_conn_action_t llc_await_busy_actions_12a[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3578,7 +3578,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_12a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_busy_actions_12b[] = { +static const llc_conn_action_t llc_await_busy_actions_12b[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3594,7 +3594,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_12b = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_busy_actions_13[] = { +static const llc_conn_action_t llc_await_busy_actions_13[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -3610,7 +3610,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_13 = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_busy_actions_14a[] = { +static const llc_conn_action_t llc_await_busy_actions_14a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, @@ -3625,7 +3625,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_14a = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_busy_actions_14b[] = { +static const llc_conn_action_t llc_await_busy_actions_14b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, @@ -3640,7 +3640,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_14b = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_busy_actions_15[] = { +static const llc_conn_action_t llc_await_busy_actions_15[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3656,12 +3656,12 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_15 = { }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_16[] = { +static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_16[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; -static llc_conn_action_t llc_await_busy_actions_16[] = { +static const llc_conn_action_t llc_await_busy_actions_16[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -3731,13 +3731,13 @@ static struct llc_conn_state_trans *llc_await_busy_state_transitions[] = { /* ----------------- LLC_CONN_STATE_AWAIT_REJ transitions --------------- */ /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */ -static llc_conn_ev_qfyr_t llc_await_reject_ev_qfyrs_1_0[] = { +static const llc_conn_ev_qfyr_t llc_await_reject_ev_qfyrs_1_0[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ -static llc_conn_action_t llc_await_reject_actions_1_0[1]; +static const llc_conn_action_t llc_await_reject_actions_1_0[1]; static struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = { .ev = llc_conn_ev_data_req, @@ -3747,7 +3747,7 @@ static struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = { }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ -static llc_conn_action_t llc_await_rejct_actions_1[] = { +static const llc_conn_action_t llc_await_rejct_actions_1[] = { [0] = llc_conn_ac_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_set_data_flag_2, [2] = NULL @@ -3761,7 +3761,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_rejct_actions_2a[] = { +static const llc_conn_action_t llc_await_rejct_actions_2a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = NULL @@ -3775,7 +3775,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_2a = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_rejct_actions_2b[] = { +static const llc_conn_action_t llc_await_rejct_actions_2b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = NULL @@ -3789,7 +3789,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_2b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_rejct_actions_3[] = { +static const llc_conn_action_t llc_await_rejct_actions_3[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -3804,7 +3804,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_3 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_rejct_actions_4[] = { +static const llc_conn_action_t llc_await_rejct_actions_4[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_stop_p_timer, @@ -3824,7 +3824,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_rejct_actions_5a[] = { +static const llc_conn_action_t llc_await_rejct_actions_5a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_xxx_x_set_0, @@ -3842,7 +3842,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_5a = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_rejct_actions_5b[] = { +static const llc_conn_action_t llc_await_rejct_actions_5b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_xxx_x_set_0, @@ -3860,7 +3860,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_5b = { }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_rejct_actions_6[] = { +static const llc_conn_action_t llc_await_rejct_actions_6[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_rsp_f_set_1, @@ -3878,7 +3878,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_6 = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_rejct_actions_7a[] = { +static const llc_conn_action_t llc_await_rejct_actions_7a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -3895,7 +3895,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_7a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_rejct_actions_7b[] = { +static const llc_conn_action_t llc_await_rejct_actions_7b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -3912,7 +3912,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_7b = { }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ -static llc_conn_action_t llc_await_rejct_actions_7c[] = { +static const llc_conn_action_t llc_await_rejct_actions_7c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -3929,7 +3929,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_7c = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_rejct_actions_8a[] = { +static const llc_conn_action_t llc_await_rejct_actions_8a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3944,7 +3944,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_8a = { }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_rejct_actions_8b[] = { +static const llc_conn_action_t llc_await_rejct_actions_8b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3959,7 +3959,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_8b = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_rejct_actions_8c[] = { +static const llc_conn_action_t llc_await_rejct_actions_8c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3974,7 +3974,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_8c = { }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_rejct_actions_8d[] = { +static const llc_conn_action_t llc_await_rejct_actions_8d[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, @@ -3989,7 +3989,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_8d = { }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_rejct_actions_9a[] = { +static const llc_conn_action_t llc_await_rejct_actions_9a[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -4005,7 +4005,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_9a = { }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_rejct_actions_9b[] = { +static const llc_conn_action_t llc_await_rejct_actions_9b[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -4021,7 +4021,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_9b = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ -static llc_conn_action_t llc_await_rejct_actions_10[] = { +static const llc_conn_action_t llc_await_rejct_actions_10[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, @@ -4037,7 +4037,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_10 = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ -static llc_conn_action_t llc_await_rejct_actions_11a[] = { +static const llc_conn_action_t llc_await_rejct_actions_11a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, @@ -4052,7 +4052,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_11a = { }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ -static llc_conn_action_t llc_await_rejct_actions_11b[] = { +static const llc_conn_action_t llc_await_rejct_actions_11b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, @@ -4067,7 +4067,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_11b = { }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ -static llc_conn_action_t llc_await_rejct_actions_12[] = { +static const llc_conn_action_t llc_await_rejct_actions_12[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, @@ -4083,12 +4083,12 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_12 = { }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_await_rejct_ev_qfyrs_13[] = { +static const llc_conn_ev_qfyr_t llc_await_rejct_ev_qfyrs_13[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; -static llc_conn_action_t llc_await_rejct_actions_13[] = { +static const llc_conn_action_t llc_await_rejct_actions_13[] = { [0] = llc_conn_ac_send_rej_cmd_p_set_1, [1] = llc_conn_ac_stop_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -4157,13 +4157,13 @@ static struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = { /* LLC_CONN_STATE_D_CONN transitions */ /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event, * cause_flag = 1 */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_conflict, [2] = NULL, }; -static llc_conn_action_t llc_d_conn_actions_1[] = { +static const llc_conn_action_t llc_d_conn_actions_1[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_ac_disc_confirm, @@ -4181,13 +4181,13 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_1 = { /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event, * cause_flag = 0 */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1_1[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_0, [1] = llc_conn_ev_qlfy_set_status_conflict, [2] = NULL, }; -static llc_conn_action_t llc_d_conn_actions_1_1[] = { +static const llc_conn_action_t llc_d_conn_actions_1_1[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_disc, @@ -4204,14 +4204,14 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = { /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, * cause_flag = 1 */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_cause_flag_eq_1, [2] = llc_conn_ev_qlfy_set_status_disc, [3] = NULL, }; -static llc_conn_action_t llc_d_conn_actions_2[] = { +static const llc_conn_action_t llc_d_conn_actions_2[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_disc_confirm, [2] = llc_conn_disc, @@ -4228,14 +4228,14 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_2 = { /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, * cause_flag = 0 */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2_1[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_cause_flag_eq_0, [2] = llc_conn_ev_qlfy_set_status_disc, [3] = NULL, }; -static llc_conn_action_t llc_d_conn_actions_2_1[] = { +static const llc_conn_action_t llc_d_conn_actions_2_1[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_disc, [2] = NULL, @@ -4249,7 +4249,7 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = { }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_d_conn_actions_3[] = { +static const llc_conn_action_t llc_d_conn_actions_3[] = { [0] = llc_conn_ac_send_ua_rsp_f_set_p, [1] = NULL, }; @@ -4264,13 +4264,13 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_3 = { /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, * cause_flag = 1 */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_disc, [2] = NULL, }; -static llc_conn_action_t llc_d_conn_actions_4[] = { +static const llc_conn_action_t llc_d_conn_actions_4[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_disc_confirm, [2] = llc_conn_disc, @@ -4287,13 +4287,13 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_4 = { /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, * cause_flag = 0 */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4_1[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_0, [1] = llc_conn_ev_qlfy_set_status_disc, [2] = NULL, }; -static llc_conn_action_t llc_d_conn_actions_4_1[] = { +static const llc_conn_action_t llc_d_conn_actions_4_1[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_disc, [2] = NULL, @@ -4310,13 +4310,13 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = { * State transition for * LLC_CONN_EV_DATA_CONN_REQ event */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_5[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_5[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ -static llc_conn_action_t llc_d_conn_actions_5[1]; +static const llc_conn_action_t llc_d_conn_actions_5[1]; static struct llc_conn_state_trans llc_d_conn_state_trans_5 = { .ev = llc_conn_ev_data_req, @@ -4326,12 +4326,12 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_5 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_6[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_6[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; -static llc_conn_action_t llc_d_conn_actions_6[] = { +static const llc_conn_action_t llc_d_conn_actions_6[] = { [0] = llc_conn_ac_send_disc_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -4346,14 +4346,14 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_6 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 1 */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_7[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_cause_flag_eq_1, [2] = llc_conn_ev_qlfy_set_status_failed, [3] = NULL, }; -static llc_conn_action_t llc_d_conn_actions_7[] = { +static const llc_conn_action_t llc_d_conn_actions_7[] = { [0] = llc_conn_ac_disc_confirm, [1] = llc_conn_disc, [2] = NULL, @@ -4367,14 +4367,14 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_7 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 0 */ -static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_8[] = { +static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_cause_flag_eq_0, [2] = llc_conn_ev_qlfy_set_status_failed, [3] = NULL, }; -static llc_conn_action_t llc_d_conn_actions_8[] = { +static const llc_conn_action_t llc_d_conn_actions_8[] = { [0] = llc_conn_disc, [1] = NULL, }; @@ -4411,7 +4411,7 @@ static struct llc_conn_state_trans *llc_d_conn_state_transitions[] = { /* LLC_CONN_STATE_RESET transitions */ /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_rst_actions_1[] = { +static const llc_conn_action_t llc_rst_actions_1[] = { [0] = llc_conn_ac_set_vs_0, [1] = llc_conn_ac_set_vr_0, [2] = llc_conn_ac_set_s_flag_1, @@ -4429,14 +4429,14 @@ static struct llc_conn_state_trans llc_rst_state_trans_1 = { /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, * cause_flag = 1 */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_cause_flag_eq_1, [2] = llc_conn_ev_qlfy_set_status_conn, [3] = NULL, }; -static llc_conn_action_t llc_rst_actions_2[] = { +static const llc_conn_action_t llc_rst_actions_2[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, @@ -4457,14 +4457,14 @@ static struct llc_conn_state_trans llc_rst_state_trans_2 = { /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, * cause_flag = 0 */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2_1[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_cause_flag_eq_0, [2] = llc_conn_ev_qlfy_set_status_rst_done, [3] = NULL, }; -static llc_conn_action_t llc_rst_actions_2_1[] = { +static const llc_conn_action_t llc_rst_actions_2_1[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, @@ -4483,13 +4483,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_2_1 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_3[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_s_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_rst_done, [2] = NULL, }; -static llc_conn_action_t llc_rst_actions_3[] = { +static const llc_conn_action_t llc_rst_actions_3[] = { [0] = llc_conn_ac_set_p_flag_0, [1] = llc_conn_ac_set_remote_busy_0, [2] = NULL, @@ -4505,12 +4505,12 @@ static struct llc_conn_state_trans llc_rst_state_trans_3 = { /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event, * cause_flag = 1 */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_disc, [2] = NULL, }; -static llc_conn_action_t llc_rst_actions_4[] = { +static const llc_conn_action_t llc_rst_actions_4[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_disc_ind, [2] = llc_conn_ac_stop_ack_timer, @@ -4528,13 +4528,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_4 = { /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event, * cause_flag = 0 */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4_1[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_0, [1] = llc_conn_ev_qlfy_set_status_refuse, [2] = NULL, }; -static llc_conn_action_t llc_rst_actions_4_1[] = { +static const llc_conn_action_t llc_rst_actions_4_1[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_disc, @@ -4551,13 +4551,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_4_1 = { /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, * cause_flag = 1 */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_disc, [2] = NULL, }; -static llc_conn_action_t llc_rst_actions_5[] = { +static const llc_conn_action_t llc_rst_actions_5[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_disc, @@ -4574,13 +4574,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_5 = { /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, * cause_flag = 0 */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5_1[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_0, [1] = llc_conn_ev_qlfy_set_status_refuse, [2] = NULL, }; -static llc_conn_action_t llc_rst_actions_5_1[] = { +static const llc_conn_action_t llc_rst_actions_5_1[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_disc, [2] = NULL, @@ -4594,13 +4594,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_5_1 = { }; /* State transitions for DATA_CONN_REQ event */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_6[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_6[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ -static llc_conn_action_t llc_rst_actions_6[1]; +static const llc_conn_action_t llc_rst_actions_6[1]; static struct llc_conn_state_trans llc_rst_state_trans_6 = { .ev = llc_conn_ev_data_req, @@ -4610,13 +4610,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_6 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_7[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = NULL, }; -static llc_conn_action_t llc_rst_actions_7[] = { +static const llc_conn_action_t llc_rst_actions_7[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -4631,14 +4631,14 @@ static struct llc_conn_state_trans llc_rst_state_trans_7 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = llc_conn_ev_qlfy_cause_flag_eq_1, [3] = llc_conn_ev_qlfy_set_status_failed, [4] = NULL, }; -static llc_conn_action_t llc_rst_actions_8[] = { +static const llc_conn_action_t llc_rst_actions_8[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_disc, [2] = NULL, @@ -4652,14 +4652,14 @@ static struct llc_conn_state_trans llc_rst_state_trans_8 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8_1[] = { +static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8_1[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = llc_conn_ev_qlfy_cause_flag_eq_0, [3] = llc_conn_ev_qlfy_set_status_failed, [4] = NULL, }; -static llc_conn_action_t llc_rst_actions_8_1[] = { +static const llc_conn_action_t llc_rst_actions_8_1[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_disc, [2] = NULL, @@ -4698,7 +4698,7 @@ static struct llc_conn_state_trans *llc_rst_state_transitions[] = { /* LLC_CONN_STATE_ERROR transitions */ /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_error_actions_1[] = { +static const llc_conn_action_t llc_error_actions_1[] = { [0] = llc_conn_ac_set_vs_0, [1] = llc_conn_ac_set_vr_0, [2] = llc_conn_ac_send_ua_rsp_f_set_p, @@ -4718,7 +4718,7 @@ static struct llc_conn_state_trans llc_error_state_trans_1 = { }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_error_actions_2[] = { +static const llc_conn_action_t llc_error_actions_2[] = { [0] = llc_conn_ac_send_ua_rsp_f_set_p, [1] = llc_conn_ac_disc_ind, [2] = llc_conn_ac_stop_ack_timer, @@ -4734,7 +4734,7 @@ static struct llc_conn_state_trans llc_error_state_trans_2 = { }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */ -static llc_conn_action_t llc_error_actions_3[] = { +static const llc_conn_action_t llc_error_actions_3[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_disc, @@ -4749,7 +4749,7 @@ static struct llc_conn_state_trans llc_error_state_trans_3 = { }; /* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */ -static llc_conn_action_t llc_error_actions_4[] = { +static const llc_conn_action_t llc_error_actions_4[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_set_retry_cnt_0, @@ -4765,7 +4765,7 @@ static struct llc_conn_state_trans llc_error_state_trans_4 = { }; /* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_X event */ -static llc_conn_action_t llc_error_actions_5[] = { +static const llc_conn_action_t llc_error_actions_5[] = { [0] = llc_conn_ac_resend_frmr_rsp_f_set_p, [1] = NULL, }; @@ -4786,12 +4786,12 @@ static struct llc_conn_state_trans llc_error_state_trans_6 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_7[] = { +static const llc_conn_ev_qfyr_t llc_error_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; -static llc_conn_action_t llc_error_actions_7[] = { +static const llc_conn_action_t llc_error_actions_7[] = { [0] = llc_conn_ac_resend_frmr_rsp_f_set_0, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, @@ -4806,12 +4806,12 @@ static struct llc_conn_state_trans llc_error_state_trans_7 = { }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ -static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_8[] = { +static const llc_conn_ev_qfyr_t llc_error_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; -static llc_conn_action_t llc_error_actions_8[] = { +static const llc_conn_action_t llc_error_actions_8[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_set_s_flag_0, [2] = llc_conn_ac_start_ack_timer, @@ -4828,13 +4828,13 @@ static struct llc_conn_state_trans llc_error_state_trans_8 = { }; /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */ -static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_9[] = { +static const llc_conn_ev_qfyr_t llc_error_ev_qfyrs_9[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ -static llc_conn_action_t llc_error_actions_9[1]; +static const llc_conn_action_t llc_error_actions_9[1]; static struct llc_conn_state_trans llc_error_state_trans_9 = { .ev = llc_conn_ev_data_req, @@ -4866,7 +4866,7 @@ static struct llc_conn_state_trans *llc_error_state_transitions[] = { /* LLC_CONN_STATE_TEMP transitions */ /* State transitions for LLC_CONN_EV_DISC_REQ event */ -static llc_conn_action_t llc_temp_actions_1[] = { +static const llc_conn_action_t llc_temp_actions_1[] = { [0] = llc_conn_ac_stop_all_timers, [1] = llc_conn_ac_send_disc_cmd_p_set_x, [2] = llc_conn_disc, diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 42dc2e45c921853bedea8987078b088a724e6fb5..81a61fce3afbfee10bd4e20ffecb4f861364c121 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -406,7 +406,7 @@ static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_trans **next_trans; - llc_conn_ev_qfyr_t *next_qualifier; + const llc_conn_ev_qfyr_t *next_qualifier; struct llc_conn_state_ev *ev = llc_conn_ev(skb); struct llc_sock *llc = llc_sk(sk); struct llc_conn_state *curr_state = @@ -454,7 +454,7 @@ static int llc_exec_conn_trans_actions(struct sock *sk, struct sk_buff *skb) { int rc = 0; - llc_conn_action_t *next_action; + const llc_conn_action_t *next_action; for (next_action = trans->ev_actions; next_action && *next_action; next_action++) { diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c index 25c31c0a3fdbeab3bbf2e2cb6604594bfa758298..6daf391b3e847db42396caf9fbbfe7dca6440006 100644 --- a/net/llc/llc_if.c +++ b/net/llc/llc_if.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/net/llc/llc_s_st.c b/net/llc/llc_s_st.c index 135f7d80069e93e12145c5419bef51151a15dcae..308c616883a40437173029f369d241787727059a 100644 --- a/net/llc/llc_s_st.c +++ b/net/llc/llc_s_st.c @@ -29,7 +29,7 @@ static struct llc_sap_state_trans llc_sap_state_trans_end; /* state LLC_SAP_STATE_INACTIVE transition for * LLC_SAP_EV_ACTIVATION_REQ event */ -static llc_sap_action_t llc_sap_inactive_state_actions_1[] = { +static const llc_sap_action_t llc_sap_inactive_state_actions_1[] = { [0] = llc_sap_action_report_status, [1] = NULL, }; @@ -47,7 +47,7 @@ static struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = { }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_UI event */ -static llc_sap_action_t llc_sap_active_state_actions_1[] = { +static const llc_sap_action_t llc_sap_active_state_actions_1[] = { [0] = llc_sap_action_unitdata_ind, [1] = NULL, }; @@ -59,7 +59,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_1 = { }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_UNITDATA_REQ event */ -static llc_sap_action_t llc_sap_active_state_actions_2[] = { +static const llc_sap_action_t llc_sap_active_state_actions_2[] = { [0] = llc_sap_action_send_ui, [1] = NULL, }; @@ -71,7 +71,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_2 = { }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_XID_REQ event */ -static llc_sap_action_t llc_sap_active_state_actions_3[] = { +static const llc_sap_action_t llc_sap_active_state_actions_3[] = { [0] = llc_sap_action_send_xid_c, [1] = NULL, }; @@ -83,7 +83,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_3 = { }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_C event */ -static llc_sap_action_t llc_sap_active_state_actions_4[] = { +static const llc_sap_action_t llc_sap_active_state_actions_4[] = { [0] = llc_sap_action_send_xid_r, [1] = NULL, }; @@ -95,7 +95,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_4 = { }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_R event */ -static llc_sap_action_t llc_sap_active_state_actions_5[] = { +static const llc_sap_action_t llc_sap_active_state_actions_5[] = { [0] = llc_sap_action_xid_ind, [1] = NULL, }; @@ -107,7 +107,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_5 = { }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_TEST_REQ event */ -static llc_sap_action_t llc_sap_active_state_actions_6[] = { +static const llc_sap_action_t llc_sap_active_state_actions_6[] = { [0] = llc_sap_action_send_test_c, [1] = NULL, }; @@ -119,7 +119,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_6 = { }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_C event */ -static llc_sap_action_t llc_sap_active_state_actions_7[] = { +static const llc_sap_action_t llc_sap_active_state_actions_7[] = { [0] = llc_sap_action_send_test_r, [1] = NULL, }; @@ -131,7 +131,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_7 = { }; /* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_R event */ -static llc_sap_action_t llc_sap_active_state_actions_8[] = { +static const llc_sap_action_t llc_sap_active_state_actions_8[] = { [0] = llc_sap_action_test_ind, [1] = NULL, }; @@ -145,7 +145,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_8 = { /* state LLC_SAP_STATE_ACTIVE transition for * LLC_SAP_EV_DEACTIVATION_REQ event */ -static llc_sap_action_t llc_sap_active_state_actions_9[] = { +static const llc_sap_action_t llc_sap_active_state_actions_9[] = { [0] = llc_sap_action_report_status, [1] = NULL, }; diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 06033f6c845ff5d2ab73a98a41d08767b88e59fd..d0e1e804ebd73dcebcf2f930b921233a49b0f454 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -146,7 +146,7 @@ static int llc_exec_sap_trans_actions(struct llc_sap *sap, struct sk_buff *skb) { int rc = 0; - llc_sap_action_t *next_action = trans->ev_actions; + const llc_sap_action_t *next_action = trans->ev_actions; for (; next_action && *next_action; next_action++) if ((*next_action)(sap, skb)) diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index aeb6a483b3bc881e4b45c6dad7aacc9ae57bde37..75cc6801a4316dbeb25958192f018b8808681096 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -33,6 +33,13 @@ config MAC80211_RC_MINSTREL_HT ---help--- This option enables the 'minstrel_ht' TX rate control algorithm +config MAC80211_RC_MINSTREL_VHT + bool "Minstrel 802.11ac support" if EXPERT + depends on MAC80211_RC_MINSTREL_HT + default n + ---help--- + This option enables VHT in the 'minstrel_ht' TX rate control algorithm + choice prompt "Default rate control algorithm" depends on MAC80211_HAS_RC @@ -169,6 +176,17 @@ config MAC80211_HT_DEBUG Do not select this option. +config MAC80211_OCB_DEBUG + bool "Verbose OCB debugging" + depends on MAC80211_DEBUG_MENU + ---help--- + Selecting this option causes mac80211 to print out + very verbose OCB debugging messages. It should not + be selected on production systems as those messages + are remotely triggerable. + + Do not select this option. + config MAC80211_IBSS_DEBUG bool "Verbose IBSS debugging" depends on MAC80211_DEBUG_MENU diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 7273d2796dd1a79e4a529bb38a69a7722a7dec3e..e53671b1105e039ad95fadc8c3f96fd090550cc3 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -27,7 +27,8 @@ mac80211-y := \ event.o \ chan.o \ trace.o mlme.o \ - tdls.o + tdls.o \ + ocb.o mac80211-$(CONFIG_MAC80211_LEDS) += led.o mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index d6986f3aa5c469fa15dfbc585d72ec0b5cc53af4..a360c15cc978b0a6c396e0a18a07cdfd5e3154e5 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -149,11 +149,6 @@ void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); } -static inline int ieee80211_ac_from_tid(int tid) -{ - return ieee802_1d_to_ac[tid & 7]; -} - /* * When multiple aggregation sessions on multiple stations * are being created/destroyed simultaneously, we need to @@ -514,6 +509,10 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, struct tid_ampdu_tx *tid_tx; int ret = 0; + if (WARN(sta->reserved_tid == tid, + "Requested to start BA session on reserved tid=%d", tid)) + return -EINVAL; + trace_api_start_tx_ba_session(pubsta, tid); if (WARN_ON_ONCE(!local->ops->ampdu_action)) @@ -770,6 +769,9 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) goto unlock; } + WARN(sta->reserved_tid == tid, + "Requested to stop BA session on reserved tid=%d", tid); + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { /* already in progress stopping it */ ret = 0; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 343da1e35025199bbef7d6680e0770fbdc3eadd2..e75d5c53e97ba9cbd9640de2d4a3798d33711609 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -20,6 +20,7 @@ #include "cfg.h" #include "rate.h" #include "mesh.h" +#include "wme.h" static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, const char *name, @@ -190,7 +191,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, * receive the key. When wpa_supplicant has roamed * using FT, it attempts to set the key before * association has completed, this rejects that attempt - * so it will set the key again after assocation. + * so it will set the key again after association. * * TODO: accept the key if we have a station entry and * add it to the device after the station. @@ -229,6 +230,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_OCB: /* shouldn't happen */ WARN_ON_ONCE(1); break; @@ -1040,6 +1042,13 @@ static int sta_apply_parameters(struct ieee80211_local *local, clear_sta_flag(sta, WLAN_STA_TDLS_PEER); } + /* mark TDLS channel switch support, if the AP allows it */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + !sdata->u.mgd.tdls_chan_switch_prohibited && + params->ext_capab_len >= 4 && + params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) + set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH); + if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) { sta->sta.uapsd_queues = params->uapsd_queues; sta->sta.max_sp = params->max_sp; @@ -1225,14 +1234,14 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, } static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, - const u8 *mac) + struct station_del_parameters *params) { struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (mac) - return sta_info_destroy_addr_bss(sdata, mac); + if (params->mac) + return sta_info_destroy_addr_bss(sdata, params->mac); sta_info_flush(sdata); return 0; @@ -1516,6 +1525,57 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, return 0; } +static void mpp_set_pinfo(struct mesh_path *mpath, u8 *mpp, + struct mpath_info *pinfo) +{ + memset(pinfo, 0, sizeof(*pinfo)); + memcpy(mpp, mpath->mpp, ETH_ALEN); + + pinfo->generation = mpp_paths_generation; +} + +static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *mpp, struct mpath_info *pinfo) + +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + mpath = mpp_path_lookup(sdata, dst); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + memcpy(dst, mpath->dst, ETH_ALEN); + mpp_set_pinfo(mpath, mpp, pinfo); + rcu_read_unlock(); + return 0; +} + +static int ieee80211_dump_mpp(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *mpp, + struct mpath_info *pinfo) +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + mpath = mpp_path_lookup_by_idx(sdata, idx); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + memcpy(dst, mpath->dst, ETH_ALEN); + mpp_set_pinfo(mpath, mpp, pinfo); + rcu_read_unlock(); + return 0; +} + static int ieee80211_get_mesh_config(struct wiphy *wiphy, struct net_device *dev, struct mesh_config *conf) @@ -1966,6 +2026,17 @@ static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) return ieee80211_ibss_leave(IEEE80211_DEV_TO_SUB_IF(dev)); } +static int ieee80211_join_ocb(struct wiphy *wiphy, struct net_device *dev, + struct ocb_setup *setup) +{ + return ieee80211_ocb_join(IEEE80211_DEV_TO_SUB_IF(dev), setup); +} + +static int ieee80211_leave_ocb(struct wiphy *wiphy, struct net_device *dev) +{ + return ieee80211_ocb_leave(IEEE80211_DEV_TO_SUB_IF(dev)); +} + static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev, int rate[IEEE80211_NUM_BANDS]) { @@ -2081,6 +2152,9 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy, struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + if (local->ops->get_txpower) + return drv_get_txpower(local, sdata, dbm); + if (!local->use_chanctx) *dbm = local->hw.conf.power_level; else @@ -2850,11 +2924,7 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) if (sdata->reserved_ready) return 0; - err = ieee80211_vif_use_reserved_context(sdata); - if (err) - return err; - - return 0; + return ieee80211_vif_use_reserved_context(sdata); } if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef, @@ -2868,7 +2938,6 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) return err; ieee80211_bss_info_change_notify(sdata, changed); - cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef); if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, @@ -2876,6 +2945,12 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) sdata->csa_block_tx = false; } + err = drv_post_channel_switch(sdata); + if (err) + return err; + + cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef); + return 0; } @@ -3053,9 +3128,11 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; + struct ieee80211_channel_switch ch_switch; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *chanctx; - int err, changed = 0; + u32 changed = 0; + int err; sdata_assert_lock(sdata); lockdep_assert_held(&local->mtx); @@ -3088,6 +3165,16 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, goto out; } + ch_switch.timestamp = 0; + ch_switch.device_timestamp = 0; + ch_switch.block_tx = params->block_tx; + ch_switch.chandef = params->chandef; + ch_switch.count = params->count; + + err = drv_pre_channel_switch(sdata, &ch_switch); + if (err) + goto out; + err = ieee80211_vif_reserve_chanctx(sdata, ¶ms->chandef, chanctx->mode, params->radar_required); @@ -3115,6 +3202,9 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, ieee80211_stop_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); + cfg80211_ch_switch_started_notify(sdata->dev, &sdata->csa_chandef, + params->count); + if (changed) { ieee80211_bss_info_change_notify(sdata, changed); drv_channel_switch_beacon(sdata, ¶ms->chandef); @@ -3431,6 +3521,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_NL80211_FRAME_TX; + info->band = band; skb_set_queue_mapping(skb, IEEE80211_AC_VO); skb->priority = 7; @@ -3438,7 +3529,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, nullfunc->qos_ctrl = cpu_to_le16(7); local_bh_disable(); - ieee80211_xmit(sdata, skb, band); + ieee80211_xmit(sdata, skb); local_bh_enable(); rcu_read_unlock(); @@ -3521,6 +3612,76 @@ static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy, return ret; } +static int ieee80211_add_tx_ts(struct wiphy *wiphy, struct net_device *dev, + u8 tsid, const u8 *peer, u8 up, + u16 admitted_time) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + int ac = ieee802_1d_to_ac[up]; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (!(sdata->wmm_acm & BIT(up))) + return -EINVAL; + + if (ifmgd->tx_tspec[ac].admitted_time) + return -EBUSY; + + if (admitted_time) { + ifmgd->tx_tspec[ac].admitted_time = 32 * admitted_time; + ifmgd->tx_tspec[ac].tsid = tsid; + ifmgd->tx_tspec[ac].up = up; + } + + return 0; +} + +static int ieee80211_del_tx_ts(struct wiphy *wiphy, struct net_device *dev, + u8 tsid, const u8 *peer) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = wiphy_priv(wiphy); + int ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; + + /* skip unused entries */ + if (!tx_tspec->admitted_time) + continue; + + if (tx_tspec->tsid != tsid) + continue; + + /* due to this new packets will be reassigned to non-ACM ACs */ + tx_tspec->up = -1; + + /* Make sure that all packets have been sent to avoid to + * restore the QoS params on packets that are still on the + * queues. + */ + synchronize_net(); + ieee80211_flush_queues(local, sdata); + + /* restore the normal QoS parameters + * (unconditionally to avoid races) + */ + tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; + tx_tspec->downgraded = false; + ieee80211_sta_handle_tspec_ac_params(sdata); + + /* finally clear all the data */ + memset(tx_tspec, 0, sizeof(*tx_tspec)); + + return 0; + } + + return -ENOENT; +} + const struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -3547,11 +3708,15 @@ const struct cfg80211_ops mac80211_config_ops = { .change_mpath = ieee80211_change_mpath, .get_mpath = ieee80211_get_mpath, .dump_mpath = ieee80211_dump_mpath, + .get_mpp = ieee80211_get_mpp, + .dump_mpp = ieee80211_dump_mpp, .update_mesh_config = ieee80211_update_mesh_config, .get_mesh_config = ieee80211_get_mesh_config, .join_mesh = ieee80211_join_mesh, .leave_mesh = ieee80211_leave_mesh, #endif + .join_ocb = ieee80211_join_ocb, + .leave_ocb = ieee80211_leave_ocb, .change_bss = ieee80211_change_bss, .set_txq_params = ieee80211_set_txq_params, .set_monitor_channel = ieee80211_set_monitor_channel, @@ -3587,6 +3752,8 @@ const struct cfg80211_ops mac80211_config_ops = { .set_rekey_data = ieee80211_set_rekey_data, .tdls_oper = ieee80211_tdls_oper, .tdls_mgmt = ieee80211_tdls_mgmt, + .tdls_channel_switch = ieee80211_tdls_channel_switch, + .tdls_cancel_channel_switch = ieee80211_tdls_cancel_channel_switch, .probe_client = ieee80211_probe_client, .set_noack_map = ieee80211_set_noack_map, #ifdef CONFIG_PM @@ -3597,4 +3764,6 @@ const struct cfg80211_ops mac80211_config_ops = { .channel_switch = ieee80211_channel_switch, .set_qos_map = ieee80211_set_qos_map, .set_ap_chanwidth = ieee80211_set_ap_chanwidth, + .add_tx_ts = ieee80211_add_tx_ts, + .del_tx_ts = ieee80211_del_tx_ts, }; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 4c74e8da64b909cc36befe4c5ecb29af5ff7629e..5d6dae9e4aac099379e6ad8ed54d5ef40eac0f88 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -270,6 +270,7 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_OCB: width = vif->bss_conf.chandef.width; break; case NL80211_IFTYPE_UNSPECIFIED: @@ -674,6 +675,7 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_OCB: break; default: WARN_ON_ONCE(1); @@ -909,6 +911,7 @@ ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata) case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_OCB: ieee80211_queue_work(&sdata->local->hw, &sdata->csa_finalize_work); break; @@ -929,6 +932,21 @@ ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata) } } +static void +ieee80211_vif_update_chandef(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_chan_def *chandef) +{ + struct ieee80211_sub_if_data *vlan; + + sdata->vif.bss_conf.chandef = *chandef; + + if (sdata->vif.type != NL80211_IFTYPE_AP) + return; + + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + vlan->vif.bss_conf.chandef = *chandef; +} + static int ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata) { @@ -991,7 +1009,7 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata) if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width) changed = BSS_CHANGED_BANDWIDTH; - sdata->vif.bss_conf.chandef = sdata->reserved_chandef; + ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef); if (changed) ieee80211_bss_info_change_notify(sdata, changed); @@ -1333,7 +1351,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) sdata->reserved_chandef.width) changed = BSS_CHANGED_BANDWIDTH; - sdata->vif.bss_conf.chandef = sdata->reserved_chandef; + ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef); if (changed) ieee80211_bss_info_change_notify(sdata, changed); @@ -1504,7 +1522,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, goto out; } - sdata->vif.bss_conf.chandef = *chandef; + ieee80211_vif_update_chandef(sdata, chandef); ret = ieee80211_assign_vif_chanctx(sdata, ctx); if (ret) { @@ -1634,7 +1652,7 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, } break; case IEEE80211_CHANCTX_WILL_BE_REPLACED: - /* TODO: Perhaps the bandwith change could be treated as a + /* TODO: Perhaps the bandwidth change could be treated as a * reservation itself? */ ret = -EBUSY; goto out; @@ -1646,7 +1664,7 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, break; } - sdata->vif.bss_conf.chandef = *chandef; + ieee80211_vif_update_chandef(sdata, chandef); ieee80211_recalc_chanctx_chantype(local, ctx); diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h index 493d68061f0c2717d8828809e8ec27d87d6d08b6..1956b3115dd5916283a613701cf799108292b7c7 100644 --- a/net/mac80211/debug.h +++ b/net/mac80211/debug.h @@ -2,6 +2,12 @@ #define __MAC80211_DEBUG_H #include +#ifdef CONFIG_MAC80211_OCB_DEBUG +#define MAC80211_OCB_DEBUG 1 +#else +#define MAC80211_OCB_DEBUG 0 +#endif + #ifdef CONFIG_MAC80211_IBSS_DEBUG #define MAC80211_IBSS_DEBUG 1 #else @@ -131,6 +137,10 @@ do { \ _sdata_dbg(MAC80211_HT_DEBUG && net_ratelimit(), \ sdata, fmt, ##__VA_ARGS__) +#define ocb_dbg(sdata, fmt, ...) \ + _sdata_dbg(MAC80211_OCB_DEBUG, \ + sdata, fmt, ##__VA_ARGS__) + #define ibss_dbg(sdata, fmt, ...) \ _sdata_dbg(MAC80211_IBSS_DEBUG, \ sdata, fmt, ##__VA_ARGS__) diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 1521cabad3d6d0f246acfc37b7512a489f14965a..5523b94c7c908f89e7d489903f486a99d738e7d3 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -300,10 +300,8 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata) lockdep_assert_held(&sdata->local->key_mtx); - if (sdata->debugfs.default_unicast_key) { - debugfs_remove(sdata->debugfs.default_unicast_key); - sdata->debugfs.default_unicast_key = NULL; - } + debugfs_remove(sdata->debugfs.default_unicast_key); + sdata->debugfs.default_unicast_key = NULL; if (sdata->default_unicast_key) { key = key_mtx_dereference(sdata->local, @@ -314,10 +312,8 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata) sdata->vif.debugfs_dir, buf); } - if (sdata->debugfs.default_multicast_key) { - debugfs_remove(sdata->debugfs.default_multicast_key); - sdata->debugfs.default_multicast_key = NULL; - } + debugfs_remove(sdata->debugfs.default_multicast_key); + sdata->debugfs.default_multicast_key = NULL; if (sdata->default_multicast_key) { key = key_mtx_dereference(sdata->local, diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index bafe489162298ec2d2df67b359985da691785258..94c70091bbd7e067b5bf1cc313e28d6a0842530e 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -74,7 +74,7 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" int res = scnprintf(buf, sizeof(buf), - "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", TEST(AUTH), TEST(ASSOC), TEST(PS_STA), TEST(PS_DRIVER), TEST(AUTHORIZED), TEST(SHORT_PREAMBLE), @@ -82,10 +82,11 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, TEST(WDS), TEST(CLEAR_PS_FILT), TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), - TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), - TEST(INSERTED), TEST(RATE_CONTROL), - TEST(TOFFSET_KNOWN), TEST(MPSP_OWNER), - TEST(MPSP_RECIPIENT)); + TEST(TDLS_PEER_AUTH), TEST(TDLS_INITIATOR), + TEST(TDLS_CHAN_SWITCH), TEST(TDLS_OFF_CHANNEL), + TEST(4ADDR_EVENT), TEST(INSERTED), + TEST(RATE_CONTROL), TEST(TOFFSET_KNOWN), + TEST(MPSP_OWNER), TEST(MPSP_RECIPIENT)); #undef TEST return simple_read_from_buffer(userbuf, count, ppos, buf, res); } diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 196d48c68134a08b644fc97f8f1a1fc740ec7bdf..2ebc9ead9695fe4bae46cb30719f59848df1b9c1 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -214,7 +214,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, BSS_CHANGED_BEACON_ENABLED) && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC && - sdata->vif.type != NL80211_IFTYPE_MESH_POINT)) + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && + sdata->vif.type != NL80211_IFTYPE_OCB)) return; if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || @@ -379,23 +380,26 @@ static inline int drv_sched_scan_stop(struct ieee80211_local *local, return ret; } -static inline void drv_sw_scan_start(struct ieee80211_local *local) +static inline void drv_sw_scan_start(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const u8 *mac_addr) { might_sleep(); - trace_drv_sw_scan_start(local); + trace_drv_sw_scan_start(local, sdata, mac_addr); if (local->ops->sw_scan_start) - local->ops->sw_scan_start(&local->hw); + local->ops->sw_scan_start(&local->hw, &sdata->vif, mac_addr); trace_drv_return_void(local); } -static inline void drv_sw_scan_complete(struct ieee80211_local *local) +static inline void drv_sw_scan_complete(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) { might_sleep(); - trace_drv_sw_scan_complete(local); + trace_drv_sw_scan_complete(local, sdata); if (local->ops->sw_scan_complete) - local->ops->sw_scan_complete(&local->hw); + local->ops->sw_scan_complete(&local->hw, &sdata->vif); trace_drv_return_void(local); } @@ -620,6 +624,21 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local, trace_drv_return_void(local); } +static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta) +{ + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return; + + trace_drv_sta_rate_tbl_update(local, sdata, sta); + if (local->ops->sta_rate_tbl_update) + local->ops->sta_rate_tbl_update(&local->hw, &sdata->vif, sta); + + trace_drv_return_void(local); +} + static inline int drv_conf_tx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u16 ac, const struct ieee80211_tx_queue_params *params) @@ -631,6 +650,12 @@ static inline int drv_conf_tx(struct ieee80211_local *local, if (!check_sdata_in_driver(sdata)) return -EIO; + if (WARN_ONCE(params->cw_min == 0 || + params->cw_min > params->cw_max, + "%s: invalid CW_min/CW_max: %d/%d\n", + sdata->name, params->cw_min, params->cw_max)) + return -EINVAL; + trace_drv_conf_tx(local, sdata, ac, params); if (local->ops->conf_tx) ret = local->ops->conf_tx(&local->hw, &sdata->vif, @@ -764,12 +789,13 @@ static inline void drv_flush(struct ieee80211_local *local, } static inline void drv_channel_switch(struct ieee80211_local *local, - struct ieee80211_channel_switch *ch_switch) + struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_switch *ch_switch) { might_sleep(); - trace_drv_channel_switch(local, ch_switch); - local->ops->channel_switch(&local->hw, ch_switch); + trace_drv_channel_switch(local, sdata, ch_switch); + local->ops->channel_switch(&local->hw, &sdata->vif, ch_switch); trace_drv_return_void(local); } @@ -1144,13 +1170,15 @@ static inline void drv_stop_ap(struct ieee80211_local *local, trace_drv_return_void(local); } -static inline void drv_restart_complete(struct ieee80211_local *local) +static inline void +drv_reconfig_complete(struct ieee80211_local *local, + enum ieee80211_reconfig_type reconfig_type) { might_sleep(); - trace_drv_restart_complete(local); - if (local->ops->restart_complete) - local->ops->restart_complete(&local->hw); + trace_drv_reconfig_complete(local, reconfig_type); + if (local->ops->reconfig_complete) + local->ops->reconfig_complete(&local->hw, reconfig_type); trace_drv_return_void(local); } @@ -1196,6 +1224,40 @@ drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata, } } +static inline int +drv_pre_channel_switch(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_switch *ch_switch) +{ + struct ieee80211_local *local = sdata->local; + int ret = 0; + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_pre_channel_switch(local, sdata, ch_switch); + if (local->ops->pre_channel_switch) + ret = local->ops->pre_channel_switch(&local->hw, &sdata->vif, + ch_switch); + trace_drv_return_int(local, ret); + return ret; +} + +static inline int +drv_post_channel_switch(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + int ret = 0; + + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_post_channel_switch(local, sdata); + if (local->ops->post_channel_switch) + ret = local->ops->post_channel_switch(&local->hw, &sdata->vif); + trace_drv_return_int(local, ret); + return ret; +} + static inline int drv_join_ibss(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { @@ -1238,4 +1300,71 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local, return ret; } +static inline int drv_get_txpower(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, int *dbm) +{ + int ret; + + if (!local->ops->get_txpower) + return -EOPNOTSUPP; + + ret = local->ops->get_txpower(&local->hw, &sdata->vif, dbm); + trace_drv_get_txpower(local, sdata, *dbm, ret); + + return ret; +} + +static inline int +drv_tdls_channel_switch(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) +{ + int ret; + + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + if (!local->ops->tdls_channel_switch) + return -EOPNOTSUPP; + + trace_drv_tdls_channel_switch(local, sdata, sta, oper_class, chandef); + ret = local->ops->tdls_channel_switch(&local->hw, &sdata->vif, sta, + oper_class, chandef, tmpl_skb, + ch_sw_tm_ie); + trace_drv_return_int(local, ret); + return ret; +} + +static inline void +drv_tdls_cancel_channel_switch(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta) +{ + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return; + + if (!local->ops->tdls_cancel_channel_switch) + return; + + trace_drv_tdls_cancel_channel_switch(local, sdata, sta); + local->ops->tdls_cancel_channel_switch(&local->hw, &sdata->vif, sta); + trace_drv_return_void(local); +} + +static inline void +drv_tdls_recv_channel_switch(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_tdls_ch_sw_params *params) +{ + trace_drv_tdls_recv_channel_switch(local, sdata, params); + if (local->ops->tdls_recv_channel_switch) + local->ops->tdls_recv_channel_switch(&local->hw, &sdata->vif, + params); + trace_drv_return_void(local); +} + #endif /* __MAC80211_DRIVER_OPS */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8c68da30595df7793545200c9ce102ae2d5e612d..cc6e964d98370e8d07b640f6d8c01ddfbce2be1b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -131,7 +131,7 @@ enum ieee80211_bss_corrupt_data_flags { * * These are bss flags that are attached to a bss in the * @valid_data field of &struct ieee80211_bss. They show which parts - * of the data structure were recieved as a result of an un-corrupted + * of the data structure were received as a result of an un-corrupted * beacon/probe response. */ enum ieee80211_bss_valid_data_flags { @@ -399,6 +399,24 @@ struct ieee80211_mgd_assoc_data { u8 ie[]; }; +struct ieee80211_sta_tx_tspec { + /* timestamp of the first packet in the time slice */ + unsigned long time_slice_start; + + u32 admitted_time; /* in usecs, unlike over the air */ + u8 tsid; + s8 up; /* signed to be able to invalidate with -1 during teardown */ + + /* consumed TX time in microseconds in the time slice */ + u32 consumed_tx_time; + enum { + TX_TSPEC_ACTION_NONE = 0, + TX_TSPEC_ACTION_DOWNGRADE, + TX_TSPEC_ACTION_STOP_DOWNGRADE, + } action; + bool downgraded; +}; + struct ieee80211_if_managed { struct timer_list timer; struct timer_list conn_mon_timer; @@ -434,6 +452,8 @@ struct ieee80211_if_managed { unsigned int flags; + bool csa_waiting_bcn; + bool beacon_crc_valid; u32 beacon_crc; @@ -505,8 +525,23 @@ struct ieee80211_if_managed { struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */ struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */ + /* TDLS support */ u8 tdls_peer[ETH_ALEN] __aligned(2); struct delayed_work tdls_peer_del_work; + struct sk_buff *orig_teardown_skb; /* The original teardown skb */ + struct sk_buff *teardown_skb; /* A copy to send through the AP */ + spinlock_t teardown_lock; /* To lock changing teardown_skb */ + bool tdls_chan_switch_prohibited; + + /* WMM-AC TSPEC support */ + struct ieee80211_sta_tx_tspec tx_tspec[IEEE80211_NUM_ACS]; + /* Use a separate work struct so that we can do something here + * while the sdata->work is flushing the queues, for example. + * otherwise, in scenarios where we hardly get any traffic out + * on the BE queue, but there's a lot of VO traffic, we might + * get stuck in a downgraded situation and flush takes forever. + */ + struct delayed_work tx_tspec_wk; }; struct ieee80211_if_ibss { @@ -546,6 +581,25 @@ struct ieee80211_if_ibss { } state; }; +/** + * struct ieee80211_if_ocb - OCB mode state + * + * @housekeeping_timer: timer for periodic invocation of a housekeeping task + * @wrkq_flags: OCB deferred task action + * @incomplete_lock: delayed STA insertion lock + * @incomplete_stations: list of STAs waiting for delayed insertion + * @joined: indication if the interface is connected to an OCB network + */ +struct ieee80211_if_ocb { + struct timer_list housekeeping_timer; + unsigned long wrkq_flags; + + spinlock_t incomplete_lock; + struct list_head incomplete_stations; + + bool joined; +}; + /** * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface * @@ -839,6 +893,7 @@ struct ieee80211_sub_if_data { struct ieee80211_if_managed mgd; struct ieee80211_if_ibss ibss; struct ieee80211_if_mesh mesh; + struct ieee80211_if_ocb ocb; u32 mntr_flags; } u; @@ -938,6 +993,7 @@ enum sdata_queue_type { IEEE80211_SDATA_QUEUE_AGG_STOP = 2, IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, + IEEE80211_SDATA_QUEUE_TDLS_CHSW = 5, }; enum { @@ -955,6 +1011,7 @@ enum queue_stop_reason { IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, IEEE80211_QUEUE_STOP_REASON_FLUSH, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, IEEE80211_QUEUE_STOP_REASONS, }; @@ -1181,7 +1238,7 @@ struct ieee80211_local { unsigned long scanning; struct cfg80211_ssid scan_ssid; struct cfg80211_scan_request *int_scan_req; - struct cfg80211_scan_request *scan_req; + struct cfg80211_scan_request __rcu *scan_req; struct ieee80211_scan_request *hw_scan_req; struct cfg80211_chan_def scan_chandef; enum ieee80211_band hw_scan_band; @@ -1191,7 +1248,8 @@ struct ieee80211_local { struct work_struct sched_scan_stopped_work; struct ieee80211_sub_if_data __rcu *sched_scan_sdata; - struct cfg80211_sched_scan_request *sched_scan_req; + struct cfg80211_sched_scan_request __rcu *sched_scan_req; + u8 scan_addr[ETH_ALEN]; unsigned long leave_oper_channel_time; enum mac80211_scan_state next_scan_state; @@ -1307,6 +1365,9 @@ struct ieee80211_local { /* virtual monitor interface */ struct ieee80211_sub_if_data __rcu *monitor_sdata; struct cfg80211_chan_def monitor_chandef; + + /* extended capabilities provided by mac80211 */ + u8 ext_capa[8]; }; static inline struct ieee80211_sub_if_data * @@ -1342,6 +1403,9 @@ struct ieee802_11_elems { size_t total_len; /* pointers to IEs */ + const struct ieee80211_tdls_lnkie *lnk_id; + const struct ieee80211_ch_switch_timing *ch_sw_timing; + const u8 *ext_capab; const u8 *ssid; const u8 *supp_rates; const u8 *ds_params; @@ -1376,6 +1440,7 @@ struct ieee802_11_elems { const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; /* length of them, respectively */ + u8 ext_capab_len; u8 ssid_len; u8 supp_rates_len; u8 tim_len; @@ -1454,6 +1519,7 @@ void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, __le16 fc, bool acked); void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); @@ -1471,6 +1537,15 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata); +/* OCB code */ +void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata); +void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const u8 *addr, u32 supp_rates); +void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata); +int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, + struct ocb_setup *setup); +int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata); + /* mesh code */ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, @@ -1562,8 +1637,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); +void __ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev, + u32 info_flags); void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, struct sk_buff_head *skbs); +struct sk_buff * +ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 info_flags); /* HT */ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, @@ -1690,8 +1771,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke gfp_t gfp); void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, bool bss_notify); -void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, - enum ieee80211_band band); +void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, @@ -1757,6 +1837,13 @@ static inline bool ieee80211_rx_reorder_ready(struct sk_buff_head *frames) return true; } +extern const int ieee802_1d_to_ac[8]; + +static inline int ieee80211_ac_from_tid(int tid) +{ + return ieee802_1d_to_ac[tid & 7]; +} + void ieee80211_dynamic_ps_enable_work(struct work_struct *work); void ieee80211_dynamic_ps_disable_work(struct work_struct *work); void ieee80211_dynamic_ps_timer(unsigned long data); @@ -1766,7 +1853,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr); void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, - struct ieee80211_hdr *hdr, bool ack); + struct ieee80211_hdr *hdr, bool ack, u16 tx_time); void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, @@ -1795,6 +1882,9 @@ void ieee80211_add_pending_skbs(struct ieee80211_local *local, struct sk_buff_head *skbs); void ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); +void __ieee80211_flush_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + unsigned int queues); void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, @@ -1811,12 +1901,14 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, u8 bands_used, u32 *rate_masks, struct cfg80211_chan_def *chandef); struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, - u8 *dst, u32 ratemask, + const u8 *src, const u8 *dst, + u32 ratemask, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, bool directed); -void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, +void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 ratemask, bool directed, u32 tx_flags, @@ -1832,8 +1924,10 @@ int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata, void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata); void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata); -size_t ieee80211_ie_split(const u8 *ies, size_t ielen, - const u8 *ids, int n_ids, size_t offset); +size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, + const u8 *after_ric, int n_after_ric, + size_t offset); size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 cap); @@ -1920,6 +2014,14 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper); void ieee80211_tdls_peer_del_work(struct work_struct *wk); +int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef); +void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr); +void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); extern const struct ethtool_ops ieee80211_ethtool_ops; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 653f5eb07a27f4432429a8fbbc4b32e7200cf4c3..41735539087380c8e84735ef52efd1f2d265bbed 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -258,6 +258,15 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, /* we hold the RTNL here so can safely walk the list */ list_for_each_entry(nsdata, &local->interfaces, list) { if (nsdata != sdata && ieee80211_sdata_running(nsdata)) { + /* + * Only OCB and monitor mode may coexist + */ + if ((sdata->vif.type == NL80211_IFTYPE_OCB && + nsdata->vif.type != NL80211_IFTYPE_MONITOR) || + (sdata->vif.type != NL80211_IFTYPE_MONITOR && + nsdata->vif.type == NL80211_IFTYPE_OCB)) + return -EBUSY; + /* * Allow only a single IBSS interface to be up at any * time. This is restricted because beacon distribution @@ -511,6 +520,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) sdata->vif.cab_queue = master->vif.cab_queue; memcpy(sdata->vif.hw_queue, master->vif.hw_queue, sizeof(sdata->vif.hw_queue)); + sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; break; } case NL80211_IFTYPE_AP: @@ -521,6 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_OCB: /* no special treatment */ break; case NL80211_IFTYPE_UNSPECIFIED: @@ -631,6 +642,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_OCB: netif_carrier_off(dev); break; case NL80211_IFTYPE_WDS: @@ -844,6 +856,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, sdata_lock(sdata); mutex_lock(&local->mtx); sdata->vif.csa_active = false; + if (sdata->vif.type == NL80211_IFTYPE_STATION) + sdata->u.mgd.csa_waiting_bcn = false; if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); @@ -1195,6 +1209,8 @@ static void ieee80211_iface_work(struct work_struct *work) WLAN_BACK_RECIPIENT, 0, false); mutex_unlock(&local->sta_mtx); + } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_TDLS_CHSW) { + ieee80211_process_tdls_channel_switch(sdata, skb); } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK) { int len = skb->len; @@ -1285,6 +1301,9 @@ static void ieee80211_iface_work(struct work_struct *work) break; ieee80211_mesh_work(sdata); break; + case NL80211_IFTYPE_OCB: + ieee80211_ocb_work(sdata); + break; default: break; } @@ -1304,6 +1323,9 @@ static void ieee80211_recalc_smps_work(struct work_struct *work) static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { + static const u8 bssid_wildcard[ETH_ALEN] = {0xff, 0xff, 0xff, + 0xff, 0xff, 0xff}; + /* clear type-dependent union */ memset(&sdata->u, 0, sizeof(sdata->u)); @@ -1355,6 +1377,10 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid; ieee80211_sta_setup_sdata(sdata); break; + case NL80211_IFTYPE_OCB: + sdata->vif.bss_conf.bssid = bssid_wildcard; + ieee80211_ocb_setup_sdata(sdata); + break; case NL80211_IFTYPE_ADHOC: sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; ieee80211_ibss_setup_sdata(sdata); @@ -1402,6 +1428,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_OCB: /* * Could maybe also all others here? * Just not sure how that interacts @@ -1417,6 +1444,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_OCB: /* * Could probably support everything * but WDS here (WDS do_open can fail @@ -1675,7 +1703,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, } ieee80211_assign_perm_addr(local, ndev->perm_addr, type); - memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); + if (params && is_valid_ether_addr(params->macaddr)) + memcpy(ndev->dev_addr, params->macaddr, ETH_ALEN); + else + memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */ diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 4712150dc2109df0b6f17162da8ec5119a94ec0a..434a91ad12c88dabcc2674413e86d08386c3b37d 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -94,8 +94,17 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) might_sleep(); - if (key->flags & KEY_FLAG_TAINTED) + if (key->flags & KEY_FLAG_TAINTED) { + /* If we get here, it's during resume and the key is + * tainted so shouldn't be used/programmed any more. + * However, its flags may still indicate that it was + * programmed into the device (since we're in resume) + * so clear that flag now to avoid trying to remove + * it again later. + */ + key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; return -EINVAL; + } if (!key->local->ops->set_key) goto out_unsupported; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 0de7c93bf62b5a3352de711ab9deeaa21649d8a1..6ab99da38db92f72c5bec089e8e6ec34267d62b0 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -478,13 +478,9 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { }, }; -static const u8 extended_capabilities[] = { - 0, 0, 0, 0, 0, 0, 0, - WLAN_EXT_CAPA8_OPMODE_NOTIF, -}; - -struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, - const struct ieee80211_ops *ops) +struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, + const struct ieee80211_ops *ops, + const char *requested_name) { struct ieee80211_local *local; int priv_size, i; @@ -524,7 +520,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, */ priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; - wiphy = wiphy_new(&mac80211_config_ops, priv_size); + wiphy = wiphy_new_nm(&mac80211_config_ops, priv_size, requested_name); if (!wiphy) return NULL; @@ -539,10 +535,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, WIPHY_FLAG_REPORTS_OBSS | WIPHY_FLAG_OFFCHAN_TX; - wiphy->extended_capabilities = extended_capabilities; - wiphy->extended_capabilities_mask = extended_capabilities; - wiphy->extended_capabilities_len = ARRAY_SIZE(extended_capabilities); - if (ops->remain_on_channel) wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; @@ -550,6 +542,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, NL80211_FEATURE_SAE | NL80211_FEATURE_HT_IBSS | NL80211_FEATURE_VIF_TXPOWER | + NL80211_FEATURE_MAC_ON_CREATE | NL80211_FEATURE_USERSPACE_MPM; if (!ops->hw_scan) @@ -591,6 +584,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask; + local->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF; + + wiphy->extended_capabilities = local->ext_capa; + wiphy->extended_capabilities_mask = local->ext_capa; + wiphy->extended_capabilities_len = + ARRAY_SIZE(local->ext_capa); + INIT_LIST_HEAD(&local->interfaces); __hw_addr_init(&local->mc_list); @@ -651,7 +651,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, return &local->hw; } -EXPORT_SYMBOL(ieee80211_alloc_hw); +EXPORT_SYMBOL(ieee80211_alloc_hw_nm); static int ieee80211_init_cipher_suites(struct ieee80211_local *local) { @@ -764,6 +764,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->hw.offchannel_tx_hw_queue >= local->hw.queues)) return -EINVAL; + if ((hw->wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) && + (!local->ops->tdls_channel_switch || + !local->ops->tdls_cancel_channel_switch || + !local->ops->tdls_recv_channel_switch)) + return -EOPNOTSUPP; + #ifdef CONFIG_PM if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume)) return -EINVAL; @@ -787,13 +793,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS)) return -EINVAL; - /* DFS currently not supported with channel context drivers */ + /* DFS is not supported with multi-channel combinations yet */ for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *comb; comb = &local->hw.wiphy->iface_combinations[i]; - if (comb->radar_detect_widths) + if (comb->radar_detect_widths && + comb->num_different_channels > 1) return -EINVAL; } } @@ -958,6 +965,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; + /* mac80211 supports eCSA, if the driver supports STA CSA at all */ + if (local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA) + local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING; + local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM; result = wiphy_register(local->hw.wiphy); @@ -1019,7 +1030,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) } /* add one default STA interface if supported */ - if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { + if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) && + !(hw->flags & IEEE80211_HW_NO_AUTO_VIF)) { result = ieee80211_if_add(local, "wlan%d", NULL, NL80211_IFTYPE_STATION, NULL); if (result) diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index f39a19f9090fe0d91181b8cb4487dd928232d24b..50c8473cf9dc5a8c75c7d963001f51390a68e247 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -270,6 +270,8 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst, const u8 *mpp); struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx); +struct mesh_path * +mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx); void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); void mesh_path_expire(struct ieee80211_sub_if_data *sdata); void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, @@ -317,6 +319,7 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); extern int mesh_paths_generation; +extern int mpp_paths_generation; #ifdef CONFIG_MAC80211_MESH static inline diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index a6699dceae7c8aeeff4419d8cc2815f9d86adf5e..b890e225a8f1b98681c53c764fc5252e5f2d5c8d 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -44,6 +44,7 @@ static struct mesh_table __rcu *mesh_paths; static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; +int mpp_paths_generation; /* This lock will have the grow table function as writer and add / delete nodes * as readers. RCU provides sufficient protection only when reading the table @@ -409,6 +410,33 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) return NULL; } +/** + * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index + * @idx: index + * @sdata: local subif, or NULL for all entries + * + * Returns: pointer to the proxy path structure, or NULL if not found. + * + * Locking: must be called within a read rcu section. + */ +struct mesh_path * +mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) +{ + struct mesh_table *tbl = rcu_dereference(mpp_paths); + struct mpath_node *node; + int i; + int j = 0; + + for_each_mesh_entry(tbl, node, i) { + if (sdata && node->mpath->sdata != sdata) + continue; + if (j++ == idx) + return node->mpath; + } + + return NULL; +} + /** * mesh_path_add_gate - add the given mpath to a mesh gate to our path table * @mpath: gate path to add to table @@ -691,6 +719,9 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, spin_unlock(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); + + mpp_paths_generation++; + if (grow) { set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); ieee80211_queue_work(&local->hw, &sdata->work); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 93af0f1c9d991a52d82cbc58bcd842415b22a04a..75a9bf50207ecd6cdf5ac41cae7101d03ac58a11 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -552,13 +552,17 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, cap = vht_cap.cap; if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_80P80MHZ) { - cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; - cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + u32 bw = cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + + cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + if (bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ || + bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; } if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_160MHZ) { cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160; - cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; } /* @@ -775,11 +779,30 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) WLAN_EID_QOS_CAPA, WLAN_EID_RRM_ENABLED_CAPABILITIES, WLAN_EID_MOBILITY_DOMAIN, + WLAN_EID_FAST_BSS_TRANSITION, /* reassoc only */ + WLAN_EID_RIC_DATA, /* reassoc only */ WLAN_EID_SUPPORTED_REGULATORY_CLASSES, }; - noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len, - before_ht, ARRAY_SIZE(before_ht), - offset); + static const u8 after_ric[] = { + WLAN_EID_SUPPORTED_REGULATORY_CLASSES, + WLAN_EID_HT_CAPABILITY, + WLAN_EID_BSS_COEX_2040, + WLAN_EID_EXT_CAPABILITY, + WLAN_EID_QOS_TRAFFIC_CAPA, + WLAN_EID_TIM_BCAST_REQ, + WLAN_EID_INTERWORKING, + /* 60GHz doesn't happen right now */ + WLAN_EID_VHT_CAPABILITY, + WLAN_EID_OPMODE_NOTIF, + }; + + noffset = ieee80211_ie_split_ric(assoc_data->ie, + assoc_data->ie_len, + before_ht, + ARRAY_SIZE(before_ht), + after_ric, + ARRAY_SIZE(after_ric), + offset); pos = skb_put(skb, noffset - offset); memcpy(pos, assoc_data->ie + offset, noffset - offset); offset = noffset; @@ -813,6 +836,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) WLAN_EID_TIM_BCAST_REQ, WLAN_EID_INTERWORKING, }; + + /* RIC already taken above, so no need to handle here anymore */ noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len, before_vht, ARRAY_SIZE(before_vht), offset); @@ -1001,14 +1026,7 @@ static void ieee80211_chswitch_work(struct work_struct *work) /* XXX: shouldn't really modify cfg80211-owned data! */ ifmgd->associated->channel = sdata->csa_chandef.chan; - sdata->vif.csa_active = false; - - /* XXX: wait for a beacon first? */ - if (sdata->csa_block_tx) { - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_block_tx = false; - } + ifmgd->csa_waiting_bcn = true; ieee80211_sta_reset_beacon_monitor(sdata); ieee80211_sta_reset_conn_monitor(sdata); @@ -1019,6 +1037,37 @@ out: sdata_unlock(sdata); } +static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + int ret; + + sdata_assert_lock(sdata); + + WARN_ON(!sdata->vif.csa_active); + + if (sdata->csa_block_tx) { + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + sdata->csa_block_tx = false; + } + + cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef); + + sdata->vif.csa_active = false; + ifmgd->csa_waiting_bcn = false; + + ret = drv_post_channel_switch(sdata); + if (ret) { + sdata_info(sdata, + "driver post channel switch failed, disconnecting\n"); + ieee80211_queue_work(&local->hw, + &ifmgd->csa_connection_drop_work); + return; + } +} + void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); @@ -1046,7 +1095,8 @@ static void ieee80211_chswitch_timer(unsigned long data) static void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, - u64 timestamp, struct ieee802_11_elems *elems, + u64 timestamp, u32 device_timestamp, + struct ieee802_11_elems *elems, bool beacon) { struct ieee80211_local *local = sdata->local; @@ -1056,6 +1106,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee80211_chanctx *chanctx; enum ieee80211_band current_band; struct ieee80211_csa_ie csa_ie; + struct ieee80211_channel_switch ch_switch; int res; sdata_assert_lock(sdata); @@ -1110,21 +1161,31 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, chanctx = container_of(conf, struct ieee80211_chanctx, conf); - if (local->use_chanctx) { - u32 num_chanctx = 0; - list_for_each_entry(chanctx, &local->chanctx_list, list) - num_chanctx++; + if (local->use_chanctx && + !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) { + sdata_info(sdata, + "driver doesn't support chan-switch with channel contexts\n"); + ieee80211_queue_work(&local->hw, + &ifmgd->csa_connection_drop_work); + mutex_unlock(&local->chanctx_mtx); + mutex_unlock(&local->mtx); + return; + } + + ch_switch.timestamp = timestamp; + ch_switch.device_timestamp = device_timestamp; + ch_switch.block_tx = csa_ie.mode; + ch_switch.chandef = csa_ie.chandef; + ch_switch.count = csa_ie.count; - if (num_chanctx > 1 || - !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) { - sdata_info(sdata, - "not handling chan-switch with channel contexts\n"); - ieee80211_queue_work(&local->hw, - &ifmgd->csa_connection_drop_work); - mutex_unlock(&local->chanctx_mtx); - mutex_unlock(&local->mtx); - return; - } + if (drv_pre_channel_switch(sdata, &ch_switch)) { + sdata_info(sdata, + "preparing for channel switch failed, disconnecting\n"); + ieee80211_queue_work(&local->hw, + &ifmgd->csa_connection_drop_work); + mutex_unlock(&local->chanctx_mtx); + mutex_unlock(&local->mtx); + return; } res = ieee80211_vif_reserve_chanctx(sdata, &csa_ie.chandef, @@ -1150,16 +1211,12 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, IEEE80211_QUEUE_STOP_REASON_CSA); mutex_unlock(&local->mtx); + cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, + csa_ie.count); + if (local->ops->channel_switch) { /* use driver's channel switch callback */ - struct ieee80211_channel_switch ch_switch = { - .timestamp = timestamp, - .block_tx = csa_ie.mode, - .chandef = csa_ie.chandef, - .count = csa_ie.count, - }; - - drv_channel_switch(local, &ch_switch); + drv_channel_switch(local, sdata, &ch_switch); return; } @@ -1580,6 +1637,95 @@ void ieee80211_dfs_cac_timer_work(struct work_struct *work) mutex_unlock(&sdata->local->mtx); } +static bool +__ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + bool ret; + int ac; + + if (local->hw.queues < IEEE80211_NUM_ACS) + return false; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; + int non_acm_ac; + unsigned long now = jiffies; + + if (tx_tspec->action == TX_TSPEC_ACTION_NONE && + tx_tspec->admitted_time && + time_after(now, tx_tspec->time_slice_start + HZ)) { + tx_tspec->consumed_tx_time = 0; + tx_tspec->time_slice_start = now; + + if (tx_tspec->downgraded) + tx_tspec->action = + TX_TSPEC_ACTION_STOP_DOWNGRADE; + } + + switch (tx_tspec->action) { + case TX_TSPEC_ACTION_STOP_DOWNGRADE: + /* take the original parameters */ + if (drv_conf_tx(local, sdata, ac, &sdata->tx_conf[ac])) + sdata_err(sdata, + "failed to set TX queue parameters for queue %d\n", + ac); + tx_tspec->action = TX_TSPEC_ACTION_NONE; + tx_tspec->downgraded = false; + ret = true; + break; + case TX_TSPEC_ACTION_DOWNGRADE: + if (time_after(now, tx_tspec->time_slice_start + HZ)) { + tx_tspec->action = TX_TSPEC_ACTION_NONE; + ret = true; + break; + } + /* downgrade next lower non-ACM AC */ + for (non_acm_ac = ac + 1; + non_acm_ac < IEEE80211_NUM_ACS; + non_acm_ac++) + if (!(sdata->wmm_acm & BIT(7 - 2 * non_acm_ac))) + break; + /* The loop will result in using BK even if it requires + * admission control, such configuration makes no sense + * and we have to transmit somehow - the AC selection + * does the same thing. + */ + if (drv_conf_tx(local, sdata, ac, + &sdata->tx_conf[non_acm_ac])) + sdata_err(sdata, + "failed to set TX queue parameters for queue %d\n", + ac); + tx_tspec->action = TX_TSPEC_ACTION_NONE; + ret = true; + schedule_delayed_work(&ifmgd->tx_tspec_wk, + tx_tspec->time_slice_start + HZ - now + 1); + break; + case TX_TSPEC_ACTION_NONE: + /* nothing now */ + break; + } + } + + return ret; +} + +void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) +{ + if (__ieee80211_sta_handle_tspec_ac_params(sdata)) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); +} + +static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata; + + sdata = container_of(work, struct ieee80211_sub_if_data, + u.mgd.tx_tspec_wk.work); + ieee80211_sta_handle_tspec_ac_params(sdata); +} + /* MLME */ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, @@ -1664,12 +1810,14 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, params.uapsd = uapsd; mlme_dbg(sdata, - "WMM queue=%d aci=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d\n", + "WMM queue=%d aci=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", queue, aci, acm, params.aifs, params.cw_min, params.cw_max, - params.txop, params.uapsd); + params.txop, params.uapsd, + ifmgd->tx_tspec[queue].downgraded); sdata->tx_conf[queue] = params; - if (drv_conf_tx(local, sdata, queue, ¶ms)) + if (!ifmgd->tx_tspec[queue].downgraded && + drv_conf_tx(local, sdata, queue, ¶ms)) sdata_err(sdata, "failed to set TX queue parameters for queue %d\n", queue); @@ -1924,6 +2072,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_vif_release_channel(sdata); sdata->vif.csa_active = false; + ifmgd->csa_waiting_bcn = false; if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); @@ -1931,6 +2080,10 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, } mutex_unlock(&local->mtx); + /* existing TX TSPEC sessions no longer exist */ + memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec)); + cancel_delayed_work_sync(&ifmgd->tx_tspec_wk); + sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; } @@ -1983,9 +2136,46 @@ out: mutex_unlock(&local->mtx); } +static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr, + u16 tx_time) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u16 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + int ac = ieee80211_ac_from_tid(tid); + struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; + unsigned long now = jiffies; + + if (likely(!tx_tspec->admitted_time)) + return; + + if (time_after(now, tx_tspec->time_slice_start + HZ)) { + tx_tspec->consumed_tx_time = 0; + tx_tspec->time_slice_start = now; + + if (tx_tspec->downgraded) { + tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE; + schedule_delayed_work(&ifmgd->tx_tspec_wk, 0); + } + } + + if (tx_tspec->downgraded) + return; + + tx_tspec->consumed_tx_time += tx_time; + + if (tx_tspec->consumed_tx_time >= tx_tspec->admitted_time) { + tx_tspec->downgraded = true; + tx_tspec->action = TX_TSPEC_ACTION_DOWNGRADE; + schedule_delayed_work(&ifmgd->tx_tspec_wk, 0); + } +} + void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, - struct ieee80211_hdr *hdr, bool ack) + struct ieee80211_hdr *hdr, bool ack, u16 tx_time) { + ieee80211_sta_tx_wmm_ac_notify(sdata, hdr, tx_time); + if (!ieee80211_is_data(hdr->frame_control)) return; @@ -2040,7 +2230,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) else ssid_len = ssid[1]; - ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, + ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL, + ssid + 2, ssid_len, NULL, 0, (u32) -1, true, 0, ifmgd->associated->channel, false); rcu_read_unlock(); @@ -2048,8 +2239,6 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); run_again(sdata, ifmgd->probe_timeout); - if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) - ieee80211_flush_queues(sdata->local, sdata); } static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, @@ -2078,9 +2267,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, "detected beacon loss from AP (missed %d beacons) - probing\n", beacon_loss_count); - ieee80211_cqm_rssi_notify(&sdata->vif, - NL80211_CQM_RSSI_BEACON_LOSS_EVENT, - GFP_KERNEL); + ieee80211_cqm_beacon_loss_notify(&sdata->vif, GFP_KERNEL); } /* @@ -2145,7 +2332,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, else ssid_len = ssid[1]; - skb = ieee80211_build_probe_req(sdata, cbss->bssid, + skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid, (u32) -1, cbss->channel, ssid + 2, ssid_len, NULL, 0, true); @@ -2172,6 +2359,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) true, frame_buf); mutex_lock(&local->mtx); sdata->vif.csa_active = false; + ifmgd->csa_waiting_bcn = false; if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); @@ -2618,6 +2806,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, } ifmgd->aid = aid; + ifmgd->tdls_chan_switch_prohibited = + elems.ext_capab && elems.ext_capab_len >= 5 && + (elems.ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED); /* * Some APs are erroneously not including some information in their @@ -3196,6 +3387,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, } } + if (ifmgd->csa_waiting_bcn) + ieee80211_chswitch_post_beacon(sdata); + if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) return; ifmgd->beacon_crc = ncrc; @@ -3204,6 +3398,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, + rx_status->device_timestamp, &elems, true); if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) && @@ -3335,8 +3530,9 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, break; ieee80211_sta_process_chanswitch(sdata, - rx_status->mactime, - &elems, false); + rx_status->mactime, + rx_status->device_timestamp, + &elems, false); } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { ies_len = skb->len - offsetof(struct ieee80211_mgmt, @@ -3357,8 +3553,9 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, &mgmt->u.action.u.ext_chan_switch.data; ieee80211_sta_process_chanswitch(sdata, - rx_status->mactime, - &elems, false); + rx_status->mactime, + rx_status->device_timestamp, + &elems, false); } break; } @@ -3456,7 +3653,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) * Direct probe is sent to broadcast address as some APs * will not answer to direct packet in unassociated state. */ - ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], + ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL, + ssidie + 2, ssidie[1], NULL, 0, (u32) -1, true, 0, auth_data->bss->channel, false); rcu_read_unlock(); @@ -3665,11 +3863,12 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data) struct ieee80211_sub_if_data *sdata = (struct ieee80211_sub_if_data *) data; struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (local->quiescing) return; - if (sdata->vif.csa_active) + if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) return; sdata->u.mgd.connection_loss = false; @@ -3687,7 +3886,7 @@ static void ieee80211_sta_conn_mon_timer(unsigned long data) if (local->quiescing) return; - if (sdata->vif.csa_active) + if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) return; ieee80211_queue_work(&local->hw, &ifmgd->monitor_work); @@ -3799,6 +3998,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) (unsigned long) sdata); setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, (unsigned long) sdata); + INIT_DELAYED_WORK(&ifmgd->tx_tspec_wk, + ieee80211_sta_handle_tspec_ac_params_wk); ifmgd->flags = 0; ifmgd->powersave = sdata->wdev.ps; @@ -3810,6 +4011,11 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC; else ifmgd->req_smps = IEEE80211_SMPS_OFF; + + /* Setup TDLS data */ + spin_lock_init(&ifmgd->teardown_lock); + ifmgd->teardown_skb = NULL; + ifmgd->orig_teardown_skb = NULL; } /* scan finished notification */ @@ -4672,6 +4878,13 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) } if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); + spin_lock_bh(&ifmgd->teardown_lock); + if (ifmgd->teardown_skb) { + kfree_skb(ifmgd->teardown_skb); + ifmgd->teardown_skb = NULL; + ifmgd->orig_teardown_skb = NULL; + } + spin_unlock_bh(&ifmgd->teardown_lock); del_timer_sync(&ifmgd->timer); sdata_unlock(sdata); } @@ -4687,3 +4900,13 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp); } EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); + +void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + trace_api_cqm_beacon_loss_notify(sdata->local, sdata); + + cfg80211_cqm_beacon_loss_notify(sdata->dev, gfp); +} +EXPORT_SYMBOL(ieee80211_cqm_beacon_loss_notify); diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c new file mode 100644 index 0000000000000000000000000000000000000000..358d5f9d820788acef2f439dee6ac88250724edc --- /dev/null +++ b/net/mac80211/ocb.c @@ -0,0 +1,250 @@ +/* + * OCB mode implementation + * + * Copyright: (c) 2014 Czech Technical University in Prague + * (c) 2014 Volkswagen Group Research + * Author: Rostislav Lisovy + * Funded by: Volkswagen Group Research + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" + +#define IEEE80211_OCB_HOUSEKEEPING_INTERVAL (60 * HZ) +#define IEEE80211_OCB_PEER_INACTIVITY_LIMIT (240 * HZ) +#define IEEE80211_OCB_MAX_STA_ENTRIES 128 + +/** + * enum ocb_deferred_task_flags - mac80211 OCB deferred tasks + * @OCB_WORK_HOUSEKEEPING: run the periodic OCB housekeeping tasks + * + * These flags are used in @wrkq_flags field of &struct ieee80211_if_ocb + */ +enum ocb_deferred_task_flags { + OCB_WORK_HOUSEKEEPING, +}; + +void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const u8 *addr, + u32 supp_rates) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_supported_band *sband; + enum nl80211_bss_scan_width scan_width; + struct sta_info *sta; + int band; + + /* XXX: Consider removing the least recently used entry and + * allow new one to be added. + */ + if (local->num_sta >= IEEE80211_OCB_MAX_STA_ENTRIES) { + net_info_ratelimited("%s: No room for a new OCB STA entry %pM\n", + sdata->name, addr); + return; + } + + ocb_dbg(sdata, "Adding new OCB station %pM\n", addr); + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + if (WARN_ON_ONCE(!chanctx_conf)) { + rcu_read_unlock(); + return; + } + band = chanctx_conf->def.chan->band; + scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); + rcu_read_unlock(); + + sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); + if (!sta) + return; + + sta->last_rx = jiffies; + + /* Add only mandatory rates for now */ + sband = local->hw.wiphy->bands[band]; + sta->sta.supp_rates[band] = + ieee80211_mandatory_rates(sband, scan_width); + + spin_lock(&ifocb->incomplete_lock); + list_add(&sta->list, &ifocb->incomplete_stations); + spin_unlock(&ifocb->incomplete_lock); + ieee80211_queue_work(&local->hw, &sdata->work); +} + +static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta) + __acquires(RCU) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u8 addr[ETH_ALEN]; + + memcpy(addr, sta->sta.addr, ETH_ALEN); + + ocb_dbg(sdata, "Adding new IBSS station %pM (dev=%s)\n", + addr, sdata->name); + + sta_info_move_state(sta, IEEE80211_STA_AUTH); + sta_info_move_state(sta, IEEE80211_STA_ASSOC); + sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); + + rate_control_rate_init(sta); + + /* If it fails, maybe we raced another insertion? */ + if (sta_info_insert_rcu(sta)) + return sta_info_get(sdata, addr); + return sta; +} + +static void ieee80211_ocb_housekeeping(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + + ocb_dbg(sdata, "Running ocb housekeeping\n"); + + ieee80211_sta_expire(sdata, IEEE80211_OCB_PEER_INACTIVITY_LIMIT); + + mod_timer(&ifocb->housekeeping_timer, + round_jiffies(jiffies + IEEE80211_OCB_HOUSEKEEPING_INTERVAL)); +} + +void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + struct sta_info *sta; + + if (ifocb->joined != true) + return; + + sdata_lock(sdata); + + spin_lock_bh(&ifocb->incomplete_lock); + while (!list_empty(&ifocb->incomplete_stations)) { + sta = list_first_entry(&ifocb->incomplete_stations, + struct sta_info, list); + list_del(&sta->list); + spin_unlock_bh(&ifocb->incomplete_lock); + + ieee80211_ocb_finish_sta(sta); + rcu_read_unlock(); + spin_lock_bh(&ifocb->incomplete_lock); + } + spin_unlock_bh(&ifocb->incomplete_lock); + + if (test_and_clear_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags)) + ieee80211_ocb_housekeeping(sdata); + + sdata_unlock(sdata); +} + +static void ieee80211_ocb_housekeeping_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata = (void *)data; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + + set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); + + ieee80211_queue_work(&local->hw, &sdata->work); +} + +void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + + setup_timer(&ifocb->housekeeping_timer, + ieee80211_ocb_housekeeping_timer, + (unsigned long)sdata); + INIT_LIST_HEAD(&ifocb->incomplete_stations); + spin_lock_init(&ifocb->incomplete_lock); +} + +int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, + struct ocb_setup *setup) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + u32 changed = BSS_CHANGED_OCB; + int err; + + if (ifocb->joined == true) + return -EINVAL; + + sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; + sdata->smps_mode = IEEE80211_SMPS_OFF; + sdata->needed_rx_chains = sdata->local->rx_chains; + + mutex_lock(&sdata->local->mtx); + err = ieee80211_vif_use_channel(sdata, &setup->chandef, + IEEE80211_CHANCTX_SHARED); + mutex_unlock(&sdata->local->mtx); + if (err) + return err; + + ieee80211_bss_info_change_notify(sdata, changed); + + ifocb->joined = true; + + set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); + ieee80211_queue_work(&local->hw, &sdata->work); + + netif_carrier_on(sdata->dev); + return 0; +} + +int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + ifocb->joined = false; + sta_info_flush(sdata); + + spin_lock_bh(&ifocb->incomplete_lock); + while (!list_empty(&ifocb->incomplete_stations)) { + sta = list_first_entry(&ifocb->incomplete_stations, + struct sta_info, list); + list_del(&sta->list); + spin_unlock_bh(&ifocb->incomplete_lock); + + sta_info_free(local, sta); + spin_lock_bh(&ifocb->incomplete_lock); + } + spin_unlock_bh(&ifocb->incomplete_lock); + + netif_carrier_off(sdata->dev); + clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB); + + mutex_lock(&sdata->local->mtx); + ieee80211_vif_release_channel(sdata); + mutex_unlock(&sdata->local->mtx); + + skb_queue_purge(&sdata->skb_queue); + + del_timer_sync(&sdata->u.ocb.housekeeping_timer); + /* If the timer fired while we waited for it, it will have + * requeued the work. Now the work will be running again + * but will not rearm the timer again because it checks + * whether we are connected to the network or not -- at this + * point we shouldn't be anymore. + */ + + return 0; +} diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 6081329784dd4475ae95317dde31910efd760525..d53355b011f5cf6407367e3c5f1a3e513d1cc08e 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -385,7 +385,7 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate, *rate = alt_rate; return; } - } else { + } else if (!(rate->flags & IEEE80211_TX_RC_VHT_MCS)) { /* handle legacy rates */ if (rate_idx_match_legacy_mask(rate, sband->n_bitrates, mask)) return; @@ -446,7 +446,8 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif, * * XXX: Should this check all retry rates? */ - if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) { + if (!(rates[0].flags & + (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))) { u32 basic_rates = vif->bss_conf.basic_rates; s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0; @@ -696,6 +697,7 @@ int rate_control_set_rates(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct ieee80211_sta_rates *rates) { + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sta_rates *old; /* @@ -709,6 +711,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, if (old) kfree_rcu(old, rcu_head); + drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); + return 0; } EXPORT_SYMBOL(rate_control_set_rates); diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 18babe30283212c18ddb113f3295d211a310d014..38652f09feaf250ec0517e23c8f147c8921f7499 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -37,13 +37,35 @@ static inline void rate_control_tx_status(struct ieee80211_local *local, struct rate_control_ref *ref = local->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; void *priv_sta = sta->rate_ctrl_priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) return; - ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); + if (ref->ops->tx_status) + ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); + else + ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info); } +static inline void +rate_control_tx_status_noskb(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct sta_info *sta, + struct ieee80211_tx_info *info) +{ + struct rate_control_ref *ref = local->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + + if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) + return; + + if (WARN_ON_ONCE(!ref->ops->tx_status_noskb)) + return; + + ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info); +} static inline void rate_control_rate_init(struct sta_info *sta) { diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 2baa7ed8789dce2752a3f69d5cc3627bbbd47c76..d51f6b1c549b8c24929e46b57b96df639d5939ba 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -191,7 +191,7 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) * (1) if any success probabilitiy >= 95%, out of those rates * choose the maximum throughput rate as max_prob_rate * (2) if all success probabilities < 95%, the rate with - * highest success probability is choosen as max_prob_rate */ + * highest success probability is chosen as max_prob_rate */ if (mrs->probability >= MINSTREL_FRAC(95, 100)) { if (mrs->cur_tp >= mi->r[tmp_prob_rate].stats.cur_tp) tmp_prob_rate = i; @@ -223,11 +223,10 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) static void minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, - struct sk_buff *skb) + struct ieee80211_tx_info *info) { struct minstrel_priv *mp = priv; struct minstrel_sta_info *mi = priv_sta; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *ar = info->status.rates; int i, ndx; int success; @@ -674,7 +673,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta) const struct rate_control_ops mac80211_minstrel = { .name = "minstrel", - .tx_status = minstrel_tx_status, + .tx_status_noskb = minstrel_tx_status, .get_rate = minstrel_get_rate, .rate_init = minstrel_rate_init, .alloc = minstrel_alloc, diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 408fd8ab4eef7eaf5f39d643644969a3943c1b3f..80452cfd2dc59f0e9891d30ea2549fc9e958babd 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include "rate.h" @@ -34,12 +35,17 @@ /* Transmit duration for the raw data part of an average sized packet */ #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) +#define BW_20 0 +#define BW_40 1 +#define BW_80 2 + /* * Define group sort order: HT40 -> SGI -> #streams */ #define GROUP_IDX(_streams, _sgi, _ht40) \ + MINSTREL_HT_GROUP_0 + \ MINSTREL_MAX_STREAMS * 2 * _ht40 + \ - MINSTREL_MAX_STREAMS * _sgi + \ + MINSTREL_MAX_STREAMS * _sgi + \ _streams - 1 /* MCS rate information for an MCS group */ @@ -47,6 +53,7 @@ [GROUP_IDX(_streams, _sgi, _ht40)] = { \ .streams = _streams, \ .flags = \ + IEEE80211_TX_RC_MCS | \ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ .duration = { \ @@ -61,6 +68,47 @@ } \ } +#define VHT_GROUP_IDX(_streams, _sgi, _bw) \ + (MINSTREL_VHT_GROUP_0 + \ + MINSTREL_MAX_STREAMS * 2 * (_bw) + \ + MINSTREL_MAX_STREAMS * (_sgi) + \ + (_streams) - 1) + +#define BW2VBPS(_bw, r3, r2, r1) \ + (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1) + +#define VHT_GROUP(_streams, _sgi, _bw) \ + [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \ + .streams = _streams, \ + .flags = \ + IEEE80211_TX_RC_VHT_MCS | \ + (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ + (_bw == BW_80 ? IEEE80211_TX_RC_80_MHZ_WIDTH : \ + _bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ + .duration = { \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 117, 54, 26)), \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 234, 108, 52)), \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 351, 162, 78)), \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 468, 216, 104)), \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 702, 324, 156)), \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 936, 432, 208)), \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 1053, 486, 234)), \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 1170, 540, 260)), \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 1404, 648, 312)), \ + MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 1560, 720, 346)) \ + } \ +} + #define CCK_DURATION(_bitrate, _short, _len) \ (1000 * (10 /* SIFS */ + \ (_short ? 72 + 24 : 144 + 48) + \ @@ -76,70 +124,161 @@ CCK_ACK_DURATION(55, _short), \ CCK_ACK_DURATION(110, _short) -#define CCK_GROUP \ - [MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS] = { \ - .streams = 0, \ - .duration = { \ - CCK_DURATION_LIST(false), \ - CCK_DURATION_LIST(true) \ - } \ +#define CCK_GROUP \ + [MINSTREL_CCK_GROUP] = { \ + .streams = 0, \ + .flags = 0, \ + .duration = { \ + CCK_DURATION_LIST(false), \ + CCK_DURATION_LIST(true) \ + } \ } +#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT +static bool minstrel_vht_only = true; +module_param(minstrel_vht_only, bool, 0644); +MODULE_PARM_DESC(minstrel_vht_only, + "Use only VHT rates when VHT is supported by sta."); +#endif + /* * To enable sufficiently targeted rate sampling, MCS rates are divided into * groups, based on the number of streams and flags (HT40, SGI) that they * use. * * Sortorder has to be fixed for GROUP_IDX macro to be applicable: - * HT40 -> SGI -> #streams + * BW -> SGI -> #streams */ const struct mcs_group minstrel_mcs_groups[] = { - MCS_GROUP(1, 0, 0), - MCS_GROUP(2, 0, 0), + MCS_GROUP(1, 0, BW_20), + MCS_GROUP(2, 0, BW_20), #if MINSTREL_MAX_STREAMS >= 3 - MCS_GROUP(3, 0, 0), + MCS_GROUP(3, 0, BW_20), #endif - MCS_GROUP(1, 1, 0), - MCS_GROUP(2, 1, 0), + MCS_GROUP(1, 1, BW_20), + MCS_GROUP(2, 1, BW_20), #if MINSTREL_MAX_STREAMS >= 3 - MCS_GROUP(3, 1, 0), + MCS_GROUP(3, 1, BW_20), #endif - MCS_GROUP(1, 0, 1), - MCS_GROUP(2, 0, 1), + MCS_GROUP(1, 0, BW_40), + MCS_GROUP(2, 0, BW_40), #if MINSTREL_MAX_STREAMS >= 3 - MCS_GROUP(3, 0, 1), + MCS_GROUP(3, 0, BW_40), #endif - MCS_GROUP(1, 1, 1), - MCS_GROUP(2, 1, 1), + MCS_GROUP(1, 1, BW_40), + MCS_GROUP(2, 1, BW_40), #if MINSTREL_MAX_STREAMS >= 3 - MCS_GROUP(3, 1, 1), + MCS_GROUP(3, 1, BW_40), #endif - /* must be last */ - CCK_GROUP -}; + CCK_GROUP, + +#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT + VHT_GROUP(1, 0, BW_20), + VHT_GROUP(2, 0, BW_20), +#if MINSTREL_MAX_STREAMS >= 3 + VHT_GROUP(3, 0, BW_20), +#endif -#define MINSTREL_CCK_GROUP (ARRAY_SIZE(minstrel_mcs_groups) - 1) + VHT_GROUP(1, 1, BW_20), + VHT_GROUP(2, 1, BW_20), +#if MINSTREL_MAX_STREAMS >= 3 + VHT_GROUP(3, 1, BW_20), +#endif + + VHT_GROUP(1, 0, BW_40), + VHT_GROUP(2, 0, BW_40), +#if MINSTREL_MAX_STREAMS >= 3 + VHT_GROUP(3, 0, BW_40), +#endif + + VHT_GROUP(1, 1, BW_40), + VHT_GROUP(2, 1, BW_40), +#if MINSTREL_MAX_STREAMS >= 3 + VHT_GROUP(3, 1, BW_40), +#endif + + VHT_GROUP(1, 0, BW_80), + VHT_GROUP(2, 0, BW_80), +#if MINSTREL_MAX_STREAMS >= 3 + VHT_GROUP(3, 0, BW_80), +#endif + + VHT_GROUP(1, 1, BW_80), + VHT_GROUP(2, 1, BW_80), +#if MINSTREL_MAX_STREAMS >= 3 + VHT_GROUP(3, 1, BW_80), +#endif +#endif +}; static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly; static void minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi); +/* + * Some VHT MCSes are invalid (when Ndbps / Nes is not an integer) + * e.g for MCS9@20MHzx1Nss: Ndbps=8x52*(5/6) Nes=1 + * + * Returns the valid mcs map for struct minstrel_mcs_group_data.supported + */ +static u16 +minstrel_get_valid_vht_rates(int bw, int nss, __le16 mcs_map) +{ + u16 mask = 0; + + if (bw == BW_20) { + if (nss != 3 && nss != 6) + mask = BIT(9); + } else if (bw == BW_80) { + if (nss == 3 || nss == 7) + mask = BIT(6); + else if (nss == 6) + mask = BIT(9); + } else { + WARN_ON(bw != BW_40); + } + + switch ((le16_to_cpu(mcs_map) >> (2 * (nss - 1))) & 3) { + case IEEE80211_VHT_MCS_SUPPORT_0_7: + mask |= 0x300; + break; + case IEEE80211_VHT_MCS_SUPPORT_0_8: + mask |= 0x200; + break; + case IEEE80211_VHT_MCS_SUPPORT_0_9: + break; + default: + mask = 0x3ff; + } + + return 0x3ff & ~mask; +} + /* * Look up an MCS group index based on mac80211 rate information */ static int minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) { - return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1, + return GROUP_IDX((rate->idx / 8) + 1, !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); } +static int +minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate) +{ + return VHT_GROUP_IDX(ieee80211_rate_get_vht_nss(rate), + !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), + !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + + 2*!!(rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)); +} + static struct minstrel_rate_stats * minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, struct ieee80211_tx_rate *rate) @@ -149,6 +288,9 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, if (rate->flags & IEEE80211_TX_RC_MCS) { group = minstrel_ht_get_group_idx(rate); idx = rate->idx % 8; + } else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + group = minstrel_vht_get_group_idx(rate); + idx = ieee80211_rate_get_vht_mcs(rate); } else { group = MINSTREL_CCK_GROUP; @@ -240,8 +382,8 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) * MCS groups, CCK rates do not provide aggregation and are therefore at last. */ static void -minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, - u8 *tp_list) +minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index, + u16 *tp_list) { int cur_group, cur_idx, cur_thr, cur_prob; int tmp_group, tmp_idx, tmp_thr, tmp_prob; @@ -275,7 +417,7 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, * Find and set the topmost probability rate per sta and per group */ static void -minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index) +minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index) { struct minstrel_mcs_group_data *mg; struct minstrel_rate_stats *mr; @@ -318,8 +460,8 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index) */ static void minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi, - u8 tmp_mcs_tp_rate[MAX_THR_RATES], - u8 tmp_cck_tp_rate[MAX_THR_RATES]) + u16 tmp_mcs_tp_rate[MAX_THR_RATES], + u16 tmp_cck_tp_rate[MAX_THR_RATES]) { unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp; int i; @@ -383,8 +525,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) struct minstrel_mcs_group_data *mg; struct minstrel_rate_stats *mr; int group, i, j; - u8 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES]; - u8 tmp_cck_tp_rate[MAX_THR_RATES], index; + u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES]; + u16 tmp_cck_tp_rate[MAX_THR_RATES], index; if (mi->ampdu_packets > 0) { mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, @@ -482,7 +624,8 @@ minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rat if (!rate->count) return false; - if (rate->flags & IEEE80211_TX_RC_MCS) + if (rate->flags & IEEE80211_TX_RC_MCS || + rate->flags & IEEE80211_TX_RC_VHT_MCS) return true; return rate->idx == mp->cck_rates[0] || @@ -514,7 +657,7 @@ minstrel_next_sample_idx(struct minstrel_ht_sta *mi) } static void -minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u8 *idx, bool primary) +minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary) { int group, orig_group; @@ -544,6 +687,9 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) struct sta_info *sta = container_of(pubsta, struct sta_info, sta); u16 tid; + if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) + return; + if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) return; @@ -554,20 +700,16 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) if (likely(sta->ampdu_mlme.tid_tx[tid])) return; - if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) - return; - ieee80211_start_tx_ba_session(pubsta, tid, 5000); } static void minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, - struct sk_buff *skb) + struct ieee80211_tx_info *info) { struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *ar = info->status.rates; struct minstrel_rate_stats *rate, *rate2; struct minstrel_priv *mp = priv; @@ -575,7 +717,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, int i; if (!msp->is_ht) - return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb); + return mac80211_minstrel.tx_status_noskb(priv, sband, sta, + &msp->legacy, info); /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && @@ -636,9 +779,6 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { update = true; minstrel_ht_update_stats(mp, mi); - if (!(info->flags & IEEE80211_TX_CTL_AMPDU) && - mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) - minstrel_aggr_check(sta, skb); } if (update) @@ -711,7 +851,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; struct minstrel_rate_stats *mr; u8 idx; - u16 flags; + u16 flags = group->flags; mr = minstrel_get_ratestats(mi, index); if (!mr->retry_updated) @@ -727,13 +867,13 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, ratetbl->rate[offset].count_rts = mr->retry_count_rtscts; } - if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) { + if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)]; - flags = 0; - } else { + else if (flags & IEEE80211_TX_RC_VHT_MCS) + idx = ((group->streams - 1) << 4) | + ((index % MCS_GROUP_RATES) & 0xF); + else idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8; - flags = IEEE80211_TX_RC_MCS | group->flags; - } if (offset > 0) { ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts; @@ -880,6 +1020,10 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, if (!msp->is_ht) return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); + if (!(info->flags & IEEE80211_TX_CTL_AMPDU) && + mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) + minstrel_aggr_check(sta, txrc->skb); + info->flags |= mi->tx_flags; minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble); @@ -913,13 +1057,15 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) { int idx = sample_idx % ARRAY_SIZE(mp->cck_rates); rate->idx = mp->cck_rates[idx]; - rate->flags = 0; - return; + } else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) { + ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES, + sample_group->streams); + } else { + rate->idx = sample_idx % MCS_GROUP_RATES + + (sample_group->streams - 1) * 8; } - rate->idx = sample_idx % MCS_GROUP_RATES + - (sample_group->streams - 1) * 8; - rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags; + rate->flags = sample_group->flags; } static void @@ -959,6 +1105,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; u16 sta_cap = sta->ht_cap.cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + int use_vht; int n_supported = 0; int ack_dur; int stbc; @@ -968,8 +1116,14 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, if (!sta->ht_cap.ht_supported) goto use_legacy; - BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != - MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1); + BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB); + +#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT + if (vht_cap->vht_supported) + use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0); + else +#endif + use_vht = 0; msp->is_ht = true; memset(mi, 0, sizeof(*mi)); @@ -994,22 +1148,28 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, } mi->sample_tries = 4; - stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >> - IEEE80211_HT_CAP_RX_STBC_SHIFT; - mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; + /* TODO tx_flags for vht - ATM the RC API is not fine-grained enough */ + if (!use_vht) { + stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >> + IEEE80211_HT_CAP_RX_STBC_SHIFT; + mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; - if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) - mi->tx_flags |= IEEE80211_TX_CTL_LDPC; + if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) + mi->tx_flags |= IEEE80211_TX_CTL_LDPC; + } for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { + u32 gflags = minstrel_mcs_groups[i].flags; + int bw, nss; + mi->groups[i].supported = 0; if (i == MINSTREL_CCK_GROUP) { minstrel_ht_update_cck(mp, mi, sband, sta); continue; } - if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) { - if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { + if (gflags & IEEE80211_TX_RC_SHORT_GI) { + if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) { if (!(sta_cap & IEEE80211_HT_CAP_SGI_40)) continue; } else { @@ -1018,17 +1178,51 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, } } - if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH && + if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH && sta->bandwidth < IEEE80211_STA_RX_BW_40) continue; + nss = minstrel_mcs_groups[i].streams; + /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */ - if (sta->smps_mode == IEEE80211_SMPS_STATIC && - minstrel_mcs_groups[i].streams > 1) + if (sta->smps_mode == IEEE80211_SMPS_STATIC && nss > 1) + continue; + + /* HT rate */ + if (gflags & IEEE80211_TX_RC_MCS) { +#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT + if (use_vht && minstrel_vht_only) + continue; +#endif + mi->groups[i].supported = mcs->rx_mask[nss - 1]; + if (mi->groups[i].supported) + n_supported++; continue; + } + + /* VHT rate */ + if (!vht_cap->vht_supported || + WARN_ON(!(gflags & IEEE80211_TX_RC_VHT_MCS)) || + WARN_ON(gflags & IEEE80211_TX_RC_160_MHZ_WIDTH)) + continue; + + if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) { + if (sta->bandwidth < IEEE80211_STA_RX_BW_80 || + ((gflags & IEEE80211_TX_RC_SHORT_GI) && + !(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) { + continue; + } + } + + if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = BW_40; + else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) + bw = BW_80; + else + bw = BW_20; - mi->groups[i].supported = - mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; + mi->groups[i].supported = minstrel_get_valid_vht_rates(bw, nss, + vht_cap->vht_mcs.tx_mcs_map); if (mi->groups[i].supported) n_supported++; @@ -1146,7 +1340,7 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta) static const struct rate_control_ops mac80211_minstrel_ht = { .name = "minstrel_ht", - .tx_status = minstrel_ht_tx_status, + .tx_status_noskb = minstrel_ht_tx_status, .get_rate = minstrel_ht_get_rate, .rate_init = minstrel_ht_rate_init, .rate_update = minstrel_ht_rate_update, diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h index 01570e0e014b0995beef18504825b848ec00cbad..f2217d6aa0c2ee28b158616711b99895b250df44 100644 --- a/net/mac80211/rc80211_minstrel_ht.h +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -13,10 +13,32 @@ * The number of streams can be changed to 2 to reduce code * size and memory footprint. */ -#define MINSTREL_MAX_STREAMS 3 -#define MINSTREL_STREAM_GROUPS 4 +#define MINSTREL_MAX_STREAMS 3 +#define MINSTREL_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */ +#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT +#define MINSTREL_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */ +#else +#define MINSTREL_VHT_STREAM_GROUPS 0 +#endif -#define MCS_GROUP_RATES 8 +#define MINSTREL_HT_GROUPS_NB (MINSTREL_MAX_STREAMS * \ + MINSTREL_HT_STREAM_GROUPS) +#define MINSTREL_VHT_GROUPS_NB (MINSTREL_MAX_STREAMS * \ + MINSTREL_VHT_STREAM_GROUPS) +#define MINSTREL_CCK_GROUPS_NB 1 +#define MINSTREL_GROUPS_NB (MINSTREL_HT_GROUPS_NB + \ + MINSTREL_VHT_GROUPS_NB + \ + MINSTREL_CCK_GROUPS_NB) + +#define MINSTREL_HT_GROUP_0 0 +#define MINSTREL_CCK_GROUP (MINSTREL_HT_GROUP_0 + MINSTREL_HT_GROUPS_NB) +#define MINSTREL_VHT_GROUP_0 (MINSTREL_CCK_GROUP + 1) + +#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT +#define MCS_GROUP_RATES 10 +#else +#define MCS_GROUP_RATES 8 +#endif struct mcs_group { u32 flags; @@ -31,11 +53,11 @@ struct minstrel_mcs_group_data { u8 column; /* bitfield of supported MCS rates of this group */ - u8 supported; + u16 supported; /* sorted rate set within a MCS group*/ - u8 max_group_tp_rate[MAX_THR_RATES]; - u8 max_group_prob_rate; + u16 max_group_tp_rate[MAX_THR_RATES]; + u16 max_group_prob_rate; /* MCS rate statistics */ struct minstrel_rate_stats rates[MCS_GROUP_RATES]; @@ -52,8 +74,8 @@ struct minstrel_ht_sta { unsigned int avg_ampdu_len; /* overall sorted rate set */ - u8 max_tp_rate[MAX_THR_RATES]; - u8 max_prob_rate; + u16 max_tp_rate[MAX_THR_RATES]; + u16 max_prob_rate; /* time of last status update */ unsigned long stats_update; @@ -80,7 +102,7 @@ struct minstrel_ht_sta { u8 cck_supported_short; /* MCS rate group info and statistics */ - struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1]; + struct minstrel_mcs_group_data groups[MINSTREL_GROUPS_NB]; }; struct minstrel_ht_sta_priv { diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index d537bec9375463eb115b0f26260f8f3d03759596..20c676b8e5b68fa366ed670640e9091c39ebfde3 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -18,19 +18,23 @@ static char * minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) { - unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; const struct mcs_group *mg; unsigned int j, tp, prob, eprob; char htmode = '2'; char gimode = 'L'; + u32 gflags; if (!mi->groups[i].supported) return p; mg = &minstrel_mcs_groups[i]; - if (mg->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + gflags = mg->flags; + + if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) htmode = '4'; - if (mg->flags & IEEE80211_TX_RC_SHORT_GI) + else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) + htmode = '8'; + if (gflags & IEEE80211_TX_RC_SHORT_GI) gimode = 'S'; for (j = 0; j < MCS_GROUP_RATES; j++) { @@ -41,10 +45,12 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) if (!(mi->groups[i].supported & BIT(j))) continue; - if (i == max_mcs) - p += sprintf(p, "CCK/%cP ", j < 4 ? 'L' : 'S'); + if (gflags & IEEE80211_TX_RC_MCS) + p += sprintf(p, " HT%c0/%cGI ", htmode, gimode); + else if (gflags & IEEE80211_TX_RC_VHT_MCS) + p += sprintf(p, "VHT%c0/%cGI ", htmode, gimode); else - p += sprintf(p, "HT%c0/%cGI ", htmode, gimode); + p += sprintf(p, " CCK/%cP ", j < 4 ? 'L' : 'S'); *(p++) = (idx == mi->max_tp_rate[0]) ? 'A' : ' '; *(p++) = (idx == mi->max_tp_rate[1]) ? 'B' : ' '; @@ -52,11 +58,14 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) *(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' '; *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' '; - if (i == max_mcs) { - int r = bitrates[j % 4]; - p += sprintf(p, " %2u.%1uM", r / 10, r % 10); + if (gflags & IEEE80211_TX_RC_MCS) { + p += sprintf(p, " MCS%-2u ", (mg->streams - 1) * 8 + j); + } else if (gflags & IEEE80211_TX_RC_VHT_MCS) { + p += sprintf(p, " MCS%-1u/%1u", j, mg->streams); } else { - p += sprintf(p, " MCS%-2u", (mg->streams - 1) * 8 + j); + int r = bitrates[j % 4]; + + p += sprintf(p, " %2u.%1uM ", r / 10, r % 10); } tp = mr->cur_tp / 10; @@ -85,7 +94,6 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) struct minstrel_ht_sta *mi = &msp->ht; struct minstrel_debugfs_info *ms; unsigned int i; - unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; char *p; int ret; @@ -96,18 +104,19 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) return ret; } - ms = kmalloc(8192, GFP_KERNEL); + ms = kmalloc(32768, GFP_KERNEL); if (!ms) return -ENOMEM; file->private_data = ms; p = ms->buf; - p += sprintf(p, "type rate tpt eprob *prob " + p += sprintf(p, " type rate tpt eprob *prob " "ret *ok(*cum) ok( cum)\n"); - - p = minstrel_ht_stats_dump(mi, max_mcs, p); - for (i = 0; i < max_mcs; i++) + p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p); + for (i = 0; i < MINSTREL_CCK_GROUP; i++) + p = minstrel_ht_stats_dump(mi, i, p); + for (i++; i < ARRAY_SIZE(mi->groups); i++) p = minstrel_ht_stats_dump(mi, i, p); p += sprintf(p, "\nTotal packet count:: ideal %d " @@ -119,7 +128,7 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); ms->len = p - ms->buf; - WARN_ON(ms->len + sizeof(*ms) > 8192); + WARN_ON(ms->len + sizeof(*ms) > 32768); return nonseekable_open(inode, file); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index a37f9af634cb6d3c7bb73ada859473e0e085402d..49c23bdf08bb2f65786269ccb11ccf8b520a6d9a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -39,7 +39,8 @@ * only useful for monitoring. */ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int rtap_vendor_space) { if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { if (likely(skb->len > FCS_LEN)) @@ -52,20 +53,25 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, } } + __pskb_pull(skb, rtap_vendor_space); + return skb; } -static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len) +static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, + unsigned int rtap_vendor_space) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_hdr *hdr; + + hdr = (void *)(skb->data + rtap_vendor_space); if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC | RX_FLAG_AMPDU_IS_ZEROLEN)) return true; - if (unlikely(skb->len < 16 + present_fcs_len)) + if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space)) return true; if (ieee80211_is_ctl(hdr->frame_control) && @@ -77,8 +83,9 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len) } static int -ieee80211_rx_radiotap_space(struct ieee80211_local *local, - struct ieee80211_rx_status *status) +ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, + struct ieee80211_rx_status *status, + struct sk_buff *skb) { int len; @@ -121,6 +128,21 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local, len += 2 * hweight8(status->chains); } + if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { + struct ieee80211_vendor_radiotap *rtap = (void *)skb->data; + + /* vendor presence bitmap */ + len += 4; + /* alignment for fixed 6-byte vendor data header */ + len = ALIGN(len, 2); + /* vendor data header */ + len += 6; + if (WARN_ON(rtap->align == 0)) + rtap->align = 1; + len = ALIGN(len, rtap->align); + len += rtap->len + rtap->pad; + } + return len; } @@ -144,13 +166,20 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, u16 channel_flags = 0; int mpdulen, chain; unsigned long chains = status->chains; + struct ieee80211_vendor_radiotap rtap = {}; + + if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { + rtap = *(struct ieee80211_vendor_radiotap *)skb->data; + /* rtap.len and rtap.pad are undone immediately */ + skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); + } mpdulen = skb->len; if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))) mpdulen += FCS_LEN; rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); - memset(rthdr, 0, rtap_len); + memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); it_present = &rthdr->it_present; /* radiotap header, set always present flags */ @@ -172,6 +201,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); } + if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { + it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | + BIT(IEEE80211_RADIOTAP_EXT); + put_unaligned_le32(it_present_val, it_present); + it_present++; + it_present_val = rtap.present; + } + put_unaligned_le32(it_present_val, it_present); pos = (void *)(it_present + 1); @@ -366,6 +403,22 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, *pos++ = status->chain_signal[chain]; *pos++ = chain; } + + if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { + /* ensure 2 byte alignment for the vendor field as required */ + if ((pos - (u8 *)rthdr) & 1) + *pos++ = 0; + *pos++ = rtap.oui[0]; + *pos++ = rtap.oui[1]; + *pos++ = rtap.oui[2]; + *pos++ = rtap.subns; + put_unaligned_le16(rtap.len, pos); + pos += 2; + /* align the actual payload as requested */ + while ((pos - (u8 *)rthdr) & (rtap.align - 1)) + *pos++ = 0; + /* data (and possible padding) already follows */ + } } /* @@ -379,10 +432,17 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); struct ieee80211_sub_if_data *sdata; - int needed_headroom; + int rt_hdrlen, needed_headroom; struct sk_buff *skb, *skb2; struct net_device *prev_dev = NULL; int present_fcs_len = 0; + unsigned int rtap_vendor_space = 0; + + if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { + struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; + + rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad; + } /* * First, we may need to make a copy of the skb because @@ -396,25 +456,27 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) present_fcs_len = FCS_LEN; - /* ensure hdr->frame_control is in skb head */ - if (!pskb_may_pull(origskb, 2)) { + /* ensure hdr->frame_control and vendor radiotap data are in skb head */ + if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) { dev_kfree_skb(origskb); return NULL; } if (!local->monitors) { - if (should_drop_frame(origskb, present_fcs_len)) { + if (should_drop_frame(origskb, present_fcs_len, + rtap_vendor_space)) { dev_kfree_skb(origskb); return NULL; } - return remove_monitor_info(local, origskb); + return remove_monitor_info(local, origskb, rtap_vendor_space); } /* room for the radiotap header based on driver features */ - needed_headroom = ieee80211_rx_radiotap_space(local, status); + rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); + needed_headroom = rt_hdrlen - rtap_vendor_space; - if (should_drop_frame(origskb, present_fcs_len)) { + if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) { /* only need to expand headroom if necessary */ skb = origskb; origskb = NULL; @@ -438,15 +500,15 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, */ skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); - origskb = remove_monitor_info(local, origskb); + origskb = remove_monitor_info(local, origskb, + rtap_vendor_space); if (!skb) return origskb; } /* prepend radiotap information */ - ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, - true); + ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -985,7 +1047,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, } static ieee80211_rx_result debug_noinline -ieee80211_rx_h_check(struct ieee80211_rx_data *rx) +ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); @@ -994,10 +1056,16 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) * Drop duplicate 802.11 retransmissions * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") */ - if (rx->skb->len >= 24 && rx->sta && - !ieee80211_is_ctl(hdr->frame_control) && - !ieee80211_is_qos_nullfunc(hdr->frame_control) && - !is_multicast_ether_addr(hdr->addr1)) { + + if (rx->skb->len < 24) + return RX_CONTINUE; + + if (ieee80211_is_ctl(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) + return RX_CONTINUE; + + if (rx->sta) { if (unlikely(ieee80211_has_retry(hdr->frame_control) && rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { @@ -1011,6 +1079,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) } } + return RX_CONTINUE; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_check(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + if (unlikely(rx->skb->len < 16)) { I802_DEBUG_INC(rx->local->rx_handlers_drop_short); return RX_DROP_MONITOR; @@ -1032,6 +1108,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) ieee80211_is_pspoll(hdr->frame_control)) && rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && rx->sdata->vif.type != NL80211_IFTYPE_WDS && + rx->sdata->vif.type != NL80211_IFTYPE_OCB && (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { /* * accept port control frames from the AP even when it's not @@ -1272,6 +1349,12 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) sta->last_rx_rate_vht_nss = status->vht_nss; } } + } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { + u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, + NL80211_IFTYPE_OCB); + /* OCB uses wild-card BSSID */ + if (is_broadcast_ether_addr(bssid)) + sta->last_rx = jiffies; } else if (!is_multicast_ether_addr(hdr->addr1)) { /* * Mesh beacons will update last_rx when if they are found to @@ -2250,6 +2333,27 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) if (!ieee80211_frame_allowed(rx, fc)) return RX_DROP_MONITOR; + /* directly handle TDLS channel switch requests/responses */ + if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == + cpu_to_be16(ETH_P_TDLS))) { + struct ieee80211_tdls_data *tf = (void *)rx->skb->data; + + if (pskb_may_pull(rx->skb, + offsetof(struct ieee80211_tdls_data, u)) && + tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && + tf->category == WLAN_CATEGORY_TDLS && + (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || + tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { + rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW; + skb_queue_tail(&sdata->skb_queue, rx->skb); + ieee80211_queue_work(&rx->local->hw, &sdata->work); + if (rx->sta) + rx->sta->rx_packets++; + + return RX_QUEUED; + } + } + if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && unlikely(port_control) && sdata->bss) { sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, @@ -2820,6 +2924,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) if (!ieee80211_vif_is_mesh(&sdata->vif) && sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_OCB && sdata->vif.type != NL80211_IFTYPE_STATION) return RX_DROP_MONITOR; @@ -2884,8 +2989,10 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, if (!local->cooked_mntrs) goto out_free_skb; + /* vendor data is long removed here */ + status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; /* room for the radiotap header based on driver features */ - needed_headroom = ieee80211_rx_radiotap_space(local, status); + needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); if (skb_headroom(skb) < needed_headroom && pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) @@ -3038,6 +3145,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) goto rxh_next; \ } while (0); + CALL_RXH(ieee80211_rx_h_check_dup) CALL_RXH(ieee80211_rx_h_check) ieee80211_rx_reorder_ampdu(rx, &reorder_release); @@ -3130,6 +3238,33 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx, BIT(rate_idx)); } break; + case NL80211_IFTYPE_OCB: + if (!bssid) + return false; + if (ieee80211_is_beacon(hdr->frame_control)) { + return false; + } else if (!is_broadcast_ether_addr(bssid)) { + ocb_dbg(sdata, "BSSID mismatch in OCB mode!\n"); + return false; + } else if (!multicast && + !ether_addr_equal(sdata->dev->dev_addr, + hdr->addr1)) { + /* if we are in promisc mode we also accept + * packets not destined for us + */ + if (!(sdata->dev->flags & IFF_PROMISC)) + return false; + rx->flags &= ~IEEE80211_RX_RA_MATCH; + } else if (!rx->sta) { + int rate_idx; + if (status->flag & RX_FLAG_HT) + rate_idx = 0; /* TODO: HT rates */ + else + rate_idx = status->rate_idx; + ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, + BIT(rate_idx)); + } + break; case NL80211_IFTYPE_MESH_POINT: if (!multicast && !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index af0d094b2f2f0ecc6939edd15b2c2d253763e10b..ae842678b629ac09dab4348385d6961e04376069 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -184,9 +184,21 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) return; if (ieee80211_is_probe_resp(mgmt->frame_control)) { - /* ignore ProbeResp to foreign address */ - if ((!sdata1 || !ether_addr_equal(mgmt->da, sdata1->vif.addr)) && - (!sdata2 || !ether_addr_equal(mgmt->da, sdata2->vif.addr))) + struct cfg80211_scan_request *scan_req; + struct cfg80211_sched_scan_request *sched_scan_req; + + scan_req = rcu_dereference(local->scan_req); + sched_scan_req = rcu_dereference(local->sched_scan_req); + + /* ignore ProbeResp to foreign address unless scanning + * with randomised address + */ + if (!(sdata1 && + (ether_addr_equal(mgmt->da, sdata1->vif.addr) || + scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) && + !(sdata2 && + (ether_addr_equal(mgmt->da, sdata2->vif.addr) || + sched_scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR))) return; elements = mgmt->u.probe_resp.variable; @@ -234,11 +246,14 @@ ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef, /* return false if no more work */ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) { - struct cfg80211_scan_request *req = local->scan_req; + struct cfg80211_scan_request *req; struct cfg80211_chan_def chandef; u8 bands_used = 0; int i, ielen, n_chans; + req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + if (test_bit(SCAN_HW_CANCELLED, &local->scanning)) return false; @@ -281,6 +296,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) bands_used, req->rates, &chandef); local->hw_scan_req->req.ie_len = ielen; local->hw_scan_req->req.no_cck = req->no_cck; + ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr); + ether_addr_copy(local->hw_scan_req->req.mac_addr_mask, + req->mac_addr_mask); return true; } @@ -290,6 +308,8 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) struct ieee80211_local *local = hw_to_local(hw); bool hw_scan = local->ops->hw_scan; bool was_scanning = local->scanning; + struct cfg80211_scan_request *scan_req; + struct ieee80211_sub_if_data *scan_sdata; lockdep_assert_held(&local->mtx); @@ -322,9 +342,15 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) kfree(local->hw_scan_req); local->hw_scan_req = NULL; - if (local->scan_req != local->int_scan_req) - cfg80211_scan_done(local->scan_req, aborted); - local->scan_req = NULL; + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + + if (scan_req != local->int_scan_req) + cfg80211_scan_done(scan_req, aborted); + RCU_INIT_POINTER(local->scan_req, NULL); + + scan_sdata = rcu_dereference_protected(local->scan_sdata, + lockdep_is_held(&local->mtx)); RCU_INIT_POINTER(local->scan_sdata, NULL); local->scanning = 0; @@ -335,7 +361,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) if (!hw_scan) { ieee80211_configure_filter(local); - drv_sw_scan_complete(local); + drv_sw_scan_complete(local, scan_sdata); ieee80211_offchannel_return(local); } @@ -361,7 +387,8 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) } EXPORT_SYMBOL(ieee80211_scan_completed); -static int ieee80211_start_sw_scan(struct ieee80211_local *local) +static int ieee80211_start_sw_scan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) { /* Software scan is not supported in multi-channel cases */ if (local->use_chanctx) @@ -380,7 +407,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) * nullfunc frames and probe requests will be dropped in * ieee80211_tx_h_check_assoc(). */ - drv_sw_scan_start(local); + drv_sw_scan_start(local, sdata, local->scan_addr); local->leave_oper_channel_time = jiffies; local->next_scan_state = SCAN_DECISION; @@ -440,23 +467,26 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, { int i; struct ieee80211_sub_if_data *sdata; + struct cfg80211_scan_request *scan_req; enum ieee80211_band band = local->hw.conf.chandef.chan->band; u32 tx_flags; + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK; - if (local->scan_req->no_cck) + if (scan_req->no_cck) tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE; sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->mtx)); - for (i = 0; i < local->scan_req->n_ssids; i++) + for (i = 0; i < scan_req->n_ssids; i++) ieee80211_send_probe_req( - sdata, NULL, - local->scan_req->ssids[i].ssid, - local->scan_req->ssids[i].ssid_len, - local->scan_req->ie, local->scan_req->ie_len, - local->scan_req->rates[band], false, + sdata, local->scan_addr, NULL, + scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len, + scan_req->ie, scan_req->ie_len, + scan_req->rates[band], false, tx_flags, local->hw.conf.chandef.chan, true); /* @@ -480,7 +510,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, if (!ieee80211_can_scan(local, sdata)) { /* wait for the work to finish/time out */ - local->scan_req = req; + rcu_assign_pointer(local->scan_req, req); rcu_assign_pointer(local->scan_sdata, sdata); return 0; } @@ -530,9 +560,16 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, */ } - local->scan_req = req; + rcu_assign_pointer(local->scan_req, req); rcu_assign_pointer(local->scan_sdata, sdata); + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + get_random_mask_addr(local->scan_addr, + req->mac_addr, + req->mac_addr_mask); + else + memcpy(local->scan_addr, sdata->vif.addr, ETH_ALEN); + if (local->ops->hw_scan) { __set_bit(SCAN_HW_SCANNING, &local->scanning); } else if ((req->n_channels == 1) && @@ -549,7 +586,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, /* Notify driver scan is starting, keep order of operations * same as normal software scan, in case that matters. */ - drv_sw_scan_start(local); + drv_sw_scan_start(local, sdata, local->scan_addr); ieee80211_configure_filter(local); /* accept probe-responses */ @@ -558,7 +595,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, if ((req->channels[0]->flags & IEEE80211_CHAN_NO_IR) || - !local->scan_req->n_ssids) { + !req->n_ssids) { next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; } else { ieee80211_scan_state_send_probe(local, &next_delay); @@ -579,8 +616,9 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, if (local->ops->hw_scan) { WARN_ON(!ieee80211_prep_hw_scan(local)); rc = drv_hw_scan(local, sdata, local->hw_scan_req); - } else - rc = ieee80211_start_sw_scan(local); + } else { + rc = ieee80211_start_sw_scan(local, sdata); + } if (rc) { kfree(local->hw_scan_req); @@ -617,6 +655,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata; struct ieee80211_channel *next_chan; enum mac80211_scan_state next_scan_state; + struct cfg80211_scan_request *scan_req; /* * check if at least one STA interface is associated, @@ -641,7 +680,10 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, } mutex_unlock(&local->iflist_mtx); - next_chan = local->scan_req->channels[local->scan_channel_idx]; + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); + + next_chan = scan_req->channels[local->scan_channel_idx]; /* * we're currently scanning a different channel, let's @@ -656,7 +698,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, local->leave_oper_channel_time + HZ / 8); if (associated && !tx_empty) { - if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) + if (scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) next_scan_state = SCAN_ABORT; else next_scan_state = SCAN_SUSPEND; @@ -677,14 +719,18 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, int skip; struct ieee80211_channel *chan; enum nl80211_bss_scan_width oper_scan_width; + struct cfg80211_scan_request *scan_req; + + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); skip = 0; - chan = local->scan_req->channels[local->scan_channel_idx]; + chan = scan_req->channels[local->scan_channel_idx]; local->scan_chandef.chan = chan; local->scan_chandef.center_freq1 = chan->center_freq; local->scan_chandef.center_freq2 = 0; - switch (local->scan_req->scan_width) { + switch (scan_req->scan_width) { case NL80211_BSS_CHAN_WIDTH_5: local->scan_chandef.width = NL80211_CHAN_WIDTH_5; break; @@ -698,7 +744,7 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, oper_scan_width = cfg80211_chandef_to_scan_width( &local->_oper_chandef); if (chan == local->_oper_chandef.chan && - oper_scan_width == local->scan_req->scan_width) + oper_scan_width == scan_req->scan_width) local->scan_chandef = local->_oper_chandef; else local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT; @@ -727,8 +773,7 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, * * In any case, it is not necessary for a passive scan. */ - if (chan->flags & IEEE80211_CHAN_NO_IR || - !local->scan_req->n_ssids) { + if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) { *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; local->next_scan_state = SCAN_DECISION; return; @@ -777,6 +822,7 @@ void ieee80211_scan_work(struct work_struct *work) struct ieee80211_local *local = container_of(work, struct ieee80211_local, scan_work.work); struct ieee80211_sub_if_data *sdata; + struct cfg80211_scan_request *scan_req; unsigned long next_delay = 0; bool aborted; @@ -784,6 +830,8 @@ void ieee80211_scan_work(struct work_struct *work) sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->mtx)); + scan_req = rcu_dereference_protected(local->scan_req, + lockdep_is_held(&local->mtx)); /* When scanning on-channel, the first-callback means completed. */ if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) { @@ -796,20 +844,19 @@ void ieee80211_scan_work(struct work_struct *work) goto out_complete; } - if (!sdata || !local->scan_req) + if (!sdata || !scan_req) goto out; - if (local->scan_req && !local->scanning) { - struct cfg80211_scan_request *req = local->scan_req; + if (!local->scanning) { int rc; - local->scan_req = NULL; + RCU_INIT_POINTER(local->scan_req, NULL); RCU_INIT_POINTER(local->scan_sdata, NULL); - rc = __ieee80211_start_scan(sdata, req); + rc = __ieee80211_start_scan(sdata, scan_req); if (rc) { /* need to complete scan in cfg80211 */ - local->scan_req = req; + rcu_assign_pointer(local->scan_req, scan_req); aborted = true; goto out_complete; } else @@ -829,7 +876,7 @@ void ieee80211_scan_work(struct work_struct *work) switch (local->next_scan_state) { case SCAN_DECISION: /* if no more bands/channels left, complete scan */ - if (local->scan_channel_idx >= local->scan_req->n_channels) { + if (local->scan_channel_idx >= scan_req->n_channels) { aborted = false; goto out_complete; } @@ -1043,7 +1090,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); if (ret == 0) { rcu_assign_pointer(local->sched_scan_sdata, sdata); - local->sched_scan_req = req; + rcu_assign_pointer(local->sched_scan_req, req); } kfree(ie); @@ -1052,7 +1099,7 @@ out: if (ret) { /* Clean in case of failure after HW restart or upon resume. */ RCU_INIT_POINTER(local->sched_scan_sdata, NULL); - local->sched_scan_req = NULL; + RCU_INIT_POINTER(local->sched_scan_req, NULL); } return ret; @@ -1090,7 +1137,7 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata) } /* We don't want to restart sched scan anymore. */ - local->sched_scan_req = NULL; + RCU_INIT_POINTER(local->sched_scan_req, NULL); if (rcu_access_pointer(local->sched_scan_sdata)) { ret = drv_sched_scan_stop(local, sdata); @@ -1125,7 +1172,7 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local) RCU_INIT_POINTER(local->sched_scan_sdata, NULL); /* If sched scan was aborted by the driver. */ - local->sched_scan_req = NULL; + RCU_INIT_POINTER(local->sched_scan_req, NULL); mutex_unlock(&local->mtx); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index de494df3bab82ce56efabde30790d8a660f6a983..a42f5b2b024d640260ce646acccad73c819a36a4 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -351,6 +351,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->sta_state = IEEE80211_STA_NONE; + /* Mark TID as unreserved */ + sta->reserved_tid = IEEE80211_TID_UNRESERVED; + ktime_get_ts(&uptime); sta->last_connected = uptime.tv_sec; ewma_init(&sta->avg_signal, 1024, 8); @@ -501,7 +504,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) /* make the station visible */ sta_info_hash_add(local, sta); - list_add_rcu(&sta->list, &local->sta_list); + list_add_tail_rcu(&sta->list, &local->sta_list); /* notify driver */ err = sta_info_insert_drv_state(local, sdata, sta); @@ -847,6 +850,15 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta) if (WARN_ON(ret)) return ret; + /* + * for TDLS peers, make sure to return to the base channel before + * removal. + */ + if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { + drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); + clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); + } + list_del_rcu(&sta->list); drv_sta_pre_rcu_remove(local, sta->sdata, sta); @@ -1249,7 +1261,8 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, return; } - ieee80211_xmit(sdata, skb, chanctx_conf->def.chan->band); + info->band = chanctx_conf->def.chan->band; + ieee80211_xmit(sdata, skb); rcu_read_unlock(); } @@ -1531,7 +1544,7 @@ void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) break; case 0: /* XXX: what is a good value? */ - n_frames = 8; + n_frames = 128; break; } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index bcda2ac7d84402f83d03f24390f22f2f32ea701e..4f052bb2a5adaf0a1d49ad49c6a27b59963fecb6 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -49,6 +49,9 @@ * packets. This means the link is enabled. * @WLAN_STA_TDLS_INITIATOR: We are the initiator of the TDLS link with this * station. + * @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching + * @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this + * TDLS peer * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was * keeping station in power-save mode, reply when the driver * unblocks the station. @@ -78,6 +81,8 @@ enum ieee80211_sta_info_flags { WLAN_STA_TDLS_PEER, WLAN_STA_TDLS_PEER_AUTH, WLAN_STA_TDLS_INITIATOR, + WLAN_STA_TDLS_CHAN_SWITCH, + WLAN_STA_TDLS_OFF_CHANNEL, WLAN_STA_UAPSD, WLAN_STA_SP, WLAN_STA_4ADDR_EVENT, @@ -249,6 +254,9 @@ struct ieee80211_tx_latency_stat { u32 bin_count; }; +/* Value to indicate no TID reservation */ +#define IEEE80211_TID_UNRESERVED 0xff + /** * struct sta_info - STA information * @@ -337,6 +345,7 @@ struct ieee80211_tx_latency_stat { * AP only. * @cipher_scheme: optional cipher scheme for this station * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed + * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED) */ struct sta_info { /* General information, mostly static */ @@ -454,6 +463,8 @@ struct sta_info { /* TDLS timeout data */ unsigned long last_tdls_pkt_time; + u8 reserved_tid; + /* keep last! */ struct ieee80211_sta sta; }; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 89290e33dafe9f72335c907f075b1f784a91e509..bb146f377ee457e66a2f85cc99c264746bcace81 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -390,6 +390,46 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, } } +/* + * Handles the tx for TDLS teardown frames. + * If the frame wasn't ACKed by the peer - it will be re-sent through the AP + */ +static void ieee80211_tdls_td_tx_handle(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 flags) +{ + struct sk_buff *teardown_skb; + struct sk_buff *orig_teardown_skb; + bool is_teardown = false; + + /* Get the teardown data we need and free the lock */ + spin_lock(&sdata->u.mgd.teardown_lock); + teardown_skb = sdata->u.mgd.teardown_skb; + orig_teardown_skb = sdata->u.mgd.orig_teardown_skb; + if ((skb == orig_teardown_skb) && teardown_skb) { + sdata->u.mgd.teardown_skb = NULL; + sdata->u.mgd.orig_teardown_skb = NULL; + is_teardown = true; + } + spin_unlock(&sdata->u.mgd.teardown_lock); + + if (is_teardown) { + /* This mechanism relies on being able to get ACKs */ + WARN_ON(!(local->hw.flags & + IEEE80211_HW_REPORTS_TX_ACK_STATUS)); + + /* Check if peer has ACKed */ + if (flags & IEEE80211_TX_STAT_ACK) { + dev_kfree_skb_any(teardown_skb); + } else { + tdls_dbg(sdata, + "TDLS Resending teardown through AP\n"); + + ieee80211_subif_start_xmit(teardown_skb, skb->dev); + } + } +} + static void ieee80211_report_used_skb(struct ieee80211_local *local, struct sk_buff *skb, bool dropped) { @@ -426,8 +466,19 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, if (!sdata) { skb->dev = NULL; } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) { - ieee80211_mgd_conn_tx_status(sdata, hdr->frame_control, - acked); + unsigned int hdr_size = + ieee80211_hdrlen(hdr->frame_control); + + /* Check to see if packet is a TDLS teardown packet */ + if (ieee80211_is_data(hdr->frame_control) && + (ieee80211_get_tdls_action(skb, hdr_size) == + WLAN_TDLS_TEARDOWN)) + ieee80211_tdls_td_tx_handle(local, sdata, skb, + info->flags); + else + ieee80211_mgd_conn_tx_status(sdata, + hdr->frame_control, + acked); } else if (ieee80211_is_nullfunc(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control)) { cfg80211_probe_status(sdata->dev, hdr->addr1, @@ -541,10 +592,9 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local, #define STA_LOST_TDLS_PKT_THRESHOLD 10 #define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */ -static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb) +static void ieee80211_lost_packet(struct sta_info *sta, + struct ieee80211_tx_info *info) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) @@ -571,24 +621,13 @@ static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb) sta->lost_packets = 0; } -void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, + struct ieee80211_tx_info *info, + int *retry_count) { - struct sk_buff *skb2; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - __le16 fc; - struct ieee80211_supported_band *sband; - struct ieee80211_sub_if_data *sdata; - struct net_device *prev_dev = NULL; - struct sta_info *sta, *tmp; - int retry_count = -1, i; int rates_idx = -1; - bool send_to_cooked; - bool acked; - struct ieee80211_bar *bar; - int rtap_len; - int shift = 0; + int count = -1; + int i; for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { if ((info->flags & IEEE80211_TX_CTL_AMPDU) && @@ -606,12 +645,91 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) break; } - retry_count += info->status.rates[i].count; + count += info->status.rates[i].count; } rates_idx = i - 1; - if (retry_count < 0) - retry_count = 0; + if (count < 0) + count = 0; + + *retry_count = count; + return rates_idx; +} + +void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, + struct ieee80211_tx_info *info) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_supported_band *sband; + int retry_count; + int rates_idx; + bool acked; + + rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count); + + sband = hw->wiphy->bands[info->band]; + + acked = !!(info->flags & IEEE80211_TX_STAT_ACK); + if (pubsta) { + struct sta_info *sta; + + sta = container_of(pubsta, struct sta_info, sta); + + if (!acked) + sta->tx_retry_failed++; + sta->tx_retry_count += retry_count; + + if (acked) { + sta->last_rx = jiffies; + + if (sta->lost_packets) + sta->lost_packets = 0; + + /* Track when last TDLS packet was ACKed */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) + sta->last_tdls_pkt_time = jiffies; + } else { + ieee80211_lost_packet(sta, info); + } + + rate_control_tx_status_noskb(local, sband, sta, info); + } + + if (acked) { + local->dot11TransmittedFrameCount++; + if (!pubsta) + local->dot11MulticastTransmittedFrameCount++; + if (retry_count > 0) + local->dot11RetryCount++; + if (retry_count > 1) + local->dot11MultipleRetryCount++; + } else { + local->dot11FailedCount++; + } +} +EXPORT_SYMBOL(ieee80211_tx_status_noskb); + +void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct sk_buff *skb2; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + __le16 fc; + struct ieee80211_supported_band *sband; + struct ieee80211_sub_if_data *sdata; + struct net_device *prev_dev = NULL; + struct sta_info *sta, *tmp; + int retry_count; + int rates_idx; + bool send_to_cooked; + bool acked; + struct ieee80211_bar *bar; + int rtap_len; + int shift = 0; + + rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count); rcu_read_lock(); @@ -704,7 +822,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) && (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) - ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data, acked); + ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data, + acked, info->status.tx_time); if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { if (info->flags & IEEE80211_TX_STAT_ACK) { @@ -715,7 +834,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) sta->last_tdls_pkt_time = jiffies; } else { - ieee80211_lost_packet(sta, skb); + ieee80211_lost_packet(sta, info); } } diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 4ea25dec06984792234f4a77edc1e26c157b0662..55ddd77b865dffc805cdf3246d0b632e1e61d183 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -35,19 +35,101 @@ void ieee80211_tdls_peer_del_work(struct work_struct *wk) mutex_unlock(&local->mtx); } -static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb) +static void ieee80211_tdls_add_ext_capab(struct ieee80211_local *local, + struct sk_buff *skb) { u8 *pos = (void *)skb_put(skb, 7); + bool chan_switch = local->hw.wiphy->features & + NL80211_FEATURE_TDLS_CHANNEL_SWITCH; *pos++ = WLAN_EID_EXT_CAPABILITY; *pos++ = 5; /* len */ *pos++ = 0x0; *pos++ = 0x0; *pos++ = 0x0; - *pos++ = 0x0; + *pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0; *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED; } +static u8 +ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u16 start, u16 end, + u16 spacing) +{ + u8 subband_cnt = 0, ch_cnt = 0; + struct ieee80211_channel *ch; + struct cfg80211_chan_def chandef; + int i, subband_start; + + for (i = start; i <= end; i += spacing) { + if (!ch_cnt) + subband_start = i; + + ch = ieee80211_get_channel(sdata->local->hw.wiphy, i); + if (ch) { + /* we will be active on the channel */ + u32 flags = IEEE80211_CHAN_DISABLED | + IEEE80211_CHAN_NO_IR; + cfg80211_chandef_create(&chandef, ch, + NL80211_CHAN_HT20); + if (cfg80211_chandef_usable(sdata->local->hw.wiphy, + &chandef, flags)) { + ch_cnt++; + continue; + } + } + + if (ch_cnt) { + u8 *pos = skb_put(skb, 2); + *pos++ = ieee80211_frequency_to_channel(subband_start); + *pos++ = ch_cnt; + + subband_cnt++; + ch_cnt = 0; + } + } + + return subband_cnt; +} + +static void +ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + /* + * Add possible channels for TDLS. These are channels that are allowed + * to be active. + */ + u8 subband_cnt; + u8 *pos = skb_put(skb, 2); + + *pos++ = WLAN_EID_SUPPORTED_CHANNELS; + + /* + * 5GHz and 2GHz channels numbers can overlap. Ignore this for now, as + * this doesn't happen in real world scenarios. + */ + + /* 2GHz, with 5MHz spacing */ + subband_cnt = ieee80211_tdls_add_subband(sdata, skb, 2412, 2472, 5); + + /* 5GHz, with 20MHz spacing */ + subband_cnt += ieee80211_tdls_add_subband(sdata, skb, 5000, 5825, 20); + + /* length */ + *pos = 2 * subband_cnt; +} + +static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb) +{ + u8 *pos = (void *)skb_put(skb, 3); + + *pos++ = WLAN_EID_BSS_COEX_2040; + *pos++ = 1; /* len */ + + *pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST; +} + static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata, u16 status_code) { @@ -190,6 +272,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, ieee80211_add_srates_ie(sdata, skb, false, band); ieee80211_add_ext_srates_ie(sdata, skb, false, band); + ieee80211_tdls_add_supp_channels(sdata, skb); /* add any custom IEs that go before Extended Capabilities */ if (extra_ies_len) { @@ -209,7 +292,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, offset = noffset; } - ieee80211_tdls_add_ext_capab(skb); + ieee80211_tdls_add_ext_capab(local, skb); /* add the QoS element if we support it */ if (local->hw.queues >= IEEE80211_NUM_ACS && @@ -271,6 +354,10 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); + if (ht_cap.ht_supported && + (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) + ieee80211_tdls_add_bss_coex_ie(skb); + /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; @@ -362,11 +449,68 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); } +static void +ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, const u8 *peer, + bool initiator, const u8 *extra_ies, + size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_tdls_data *tf; + size_t offset = 0, noffset; + u8 *pos; + + if (WARN_ON_ONCE(!chandef)) + return; + + tf = (void *)skb->data; + tf->u.chan_switch_req.target_channel = + ieee80211_frequency_to_channel(chandef->chan->center_freq); + tf->u.chan_switch_req.oper_class = oper_class; + + if (extra_ies_len) { + static const u8 before_lnkie[] = { + WLAN_EID_SECONDARY_CHANNEL_OFFSET, + }; + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_lnkie, + ARRAY_SIZE(before_lnkie), + offset); + pos = skb_put(skb, noffset - offset); + memcpy(pos, extra_ies + offset, noffset - offset); + offset = noffset; + } + + ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); + + /* add any remaining IEs */ + if (extra_ies_len) { + noffset = extra_ies_len; + pos = skb_put(skb, noffset - offset); + memcpy(pos, extra_ies + offset, noffset - offset); + } +} + +static void +ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, const u8 *peer, + u16 status_code, bool initiator, + const u8 *extra_ies, + size_t extra_ies_len) +{ + if (status_code == 0) + ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); + + if (extra_ies_len) + memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len); +} + static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, u8 action_code, u16 status_code, bool initiator, const u8 *extra_ies, - size_t extra_ies_len) + size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef) { switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: @@ -393,6 +537,18 @@ static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata, if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN) ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); break; + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + ieee80211_tdls_add_chan_switch_req_ies(sdata, skb, peer, + initiator, extra_ies, + extra_ies_len, + oper_class, chandef); + break; + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: + ieee80211_tdls_add_chan_switch_resp_ies(sdata, skb, peer, + status_code, + initiator, extra_ies, + extra_ies_len); + break; } } @@ -459,6 +615,19 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, skb_put(skb, sizeof(tf->u.discover_req)); tf->u.discover_req.dialog_token = dialog_token; break; + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST; + + skb_put(skb, sizeof(tf->u.chan_switch_req)); + break; + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: + tf->category = WLAN_CATEGORY_TDLS; + tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE; + + skb_put(skb, sizeof(tf->u.chan_switch_resp)); + tf->u.chan_switch_resp.status_code = cpu_to_le16(status_code); + break; default: return -EINVAL; } @@ -502,32 +671,33 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, return 0; } -static int -ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u8 action_code, - u8 dialog_token, u16 status_code, - u32 peer_capability, bool initiator, - const u8 *extra_ies, size_t extra_ies_len) +static struct sk_buff * +ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u8 action_code, + u8 dialog_token, u16 status_code, + bool initiator, const u8 *extra_ies, + size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef) { - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; - struct sk_buff *skb = NULL; - bool send_direct; - struct sta_info *sta; + struct sk_buff *skb; int ret; - skb = dev_alloc_skb(local->hw.extra_tx_headroom + - max(sizeof(struct ieee80211_mgmt), - sizeof(struct ieee80211_tdls_data)) + - 50 + /* supported rates */ - 7 + /* ext capab */ - 26 + /* max(WMM-info, WMM-param) */ - 2 + max(sizeof(struct ieee80211_ht_cap), - sizeof(struct ieee80211_ht_operation)) + - extra_ies_len + - sizeof(struct ieee80211_tdls_lnkie)); + skb = netdev_alloc_skb(sdata->dev, + local->hw.extra_tx_headroom + + max(sizeof(struct ieee80211_mgmt), + sizeof(struct ieee80211_tdls_data)) + + 50 + /* supported rates */ + 7 + /* ext capab */ + 26 + /* max(WMM-info, WMM-param) */ + 2 + max(sizeof(struct ieee80211_ht_cap), + sizeof(struct ieee80211_ht_operation)) + + 50 + /* supported channels */ + 3 + /* 40/20 BSS coex */ + extra_ies_len + + sizeof(struct ieee80211_tdls_lnkie)); if (!skb) - return -ENOMEM; + return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); @@ -537,16 +707,18 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, case WLAN_TDLS_SETUP_CONFIRM: case WLAN_TDLS_TEARDOWN: case WLAN_TDLS_DISCOVERY_REQUEST: - ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer, + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: + ret = ieee80211_prep_tdls_encap_data(local->hw.wiphy, + sdata->dev, peer, action_code, dialog_token, status_code, skb); - send_direct = false; break; case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: - ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code, + ret = ieee80211_prep_tdls_direct(local->hw.wiphy, sdata->dev, + peer, action_code, dialog_token, status_code, skb); - send_direct = true; break; default: ret = -ENOTSUPP; @@ -556,14 +728,40 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, if (ret < 0) goto fail; + ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code, + initiator, extra_ies, extra_ies_len, oper_class, + chandef); + return skb; + +fail: + dev_kfree_skb(skb); + return NULL; +} + +static int +ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, + u16 status_code, u32 peer_capability, + bool initiator, const u8 *extra_ies, + size_t extra_ies_len, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sk_buff *skb = NULL; + struct sta_info *sta; + u32 flags = 0; + int ret = 0; + rcu_read_lock(); sta = sta_info_get(sdata, peer); /* infer the initiator if we can, to support old userspace */ switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: - if (sta) + if (sta) { set_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); + sta->sta.tdls_initiator = false; + } /* fall-through */ case WLAN_TDLS_SETUP_CONFIRM: case WLAN_TDLS_DISCOVERY_REQUEST: @@ -575,13 +773,17 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, * Make the last packet sent take effect for the initiator * value. */ - if (sta) + if (sta) { clear_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); + sta->sta.tdls_initiator = true; + } /* fall-through */ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: initiator = false; break; case WLAN_TDLS_TEARDOWN: + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: /* any value is ok */ break; default: @@ -596,9 +798,17 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, if (ret < 0) goto fail; - ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code, - initiator, extra_ies, extra_ies_len); - if (send_direct) { + skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, action_code, + dialog_token, status_code, + initiator, extra_ies, + extra_ies_len, oper_class, + chandef); + if (!skb) { + ret = -EINVAL; + goto fail; + } + + if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) { ieee80211_tx_skb(sdata, skb); return 0; } @@ -619,9 +829,44 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, break; } + /* + * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress. + * Later, if no ACK is returned from peer, we will re-send the teardown + * packet through the AP. + */ + if ((action_code == WLAN_TDLS_TEARDOWN) && + (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { + struct sta_info *sta = NULL; + bool try_resend; /* Should we keep skb for possible resend */ + + /* If not sending directly to peer - no point in keeping skb */ + rcu_read_lock(); + sta = sta_info_get(sdata, peer); + try_resend = sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); + rcu_read_unlock(); + + spin_lock_bh(&sdata->u.mgd.teardown_lock); + if (try_resend && !sdata->u.mgd.teardown_skb) { + /* Mark it as requiring TX status callback */ + flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_INTFL_MLME_CONN_TX; + + /* + * skb is copied since mac80211 will later set + * properties that might not be the same as the AP, + * such as encryption, QoS, addresses, etc. + * + * No problem if skb_copy() fails, so no need to check. + */ + sdata->u.mgd.teardown_skb = skb_copy(skb, GFP_ATOMIC); + sdata->u.mgd.orig_teardown_skb = skb; + } + spin_unlock_bh(&sdata->u.mgd.teardown_lock); + } + /* disable bottom halves when entering the Tx path */ local_bh_disable(); - ret = ieee80211_subif_start_xmit(skb, dev); + __ieee80211_subif_start_xmit(skb, dev, flags); local_bh_enable(); return ret; @@ -672,7 +917,8 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev, ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, - extra_ies, extra_ies_len); + extra_ies, extra_ies_len, 0, + NULL); if (ret < 0) goto exit; @@ -711,7 +957,8 @@ ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev, ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, - extra_ies, extra_ies_len); + extra_ies, extra_ies_len, 0, + NULL); if (ret < 0) sdata_err(sdata, "Failed sending TDLS teardown packet %d\n", ret); @@ -781,7 +1028,7 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, status_code, peer_capability, initiator, extra_ies, - extra_ies_len); + extra_ies_len, 0, NULL); break; default: ret = -EOPNOTSUPP; @@ -884,3 +1131,480 @@ void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer, cfg80211_tdls_oper_request(sdata->dev, peer, oper, reason_code, gfp); } EXPORT_SYMBOL(ieee80211_tdls_oper_request); + +static void +iee80211_tdls_add_ch_switch_timing(u8 *buf, u16 switch_time, u16 switch_timeout) +{ + struct ieee80211_ch_switch_timing *ch_sw; + + *buf++ = WLAN_EID_CHAN_SWITCH_TIMING; + *buf++ = sizeof(struct ieee80211_ch_switch_timing); + + ch_sw = (void *)buf; + ch_sw->switch_time = cpu_to_le16(switch_time); + ch_sw->switch_timeout = cpu_to_le16(switch_timeout); +} + +/* find switch timing IE in SKB ready for Tx */ +static const u8 *ieee80211_tdls_find_sw_timing_ie(struct sk_buff *skb) +{ + struct ieee80211_tdls_data *tf; + const u8 *ie_start; + + /* + * Get the offset for the new location of the switch timing IE. + * The SKB network header will now point to the "payload_type" + * element of the TDLS data frame struct. + */ + tf = container_of(skb->data + skb_network_offset(skb), + struct ieee80211_tdls_data, payload_type); + ie_start = tf->u.chan_switch_req.variable; + return cfg80211_find_ie(WLAN_EID_CHAN_SWITCH_TIMING, ie_start, + skb->len - (ie_start - skb->data)); +} + +static struct sk_buff * +ieee80211_tdls_ch_sw_tmpl_get(struct sta_info *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + u32 *ch_sw_tm_ie_offset) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u8 extra_ies[2 + sizeof(struct ieee80211_sec_chan_offs_ie) + + 2 + sizeof(struct ieee80211_ch_switch_timing)]; + int extra_ies_len = 2 + sizeof(struct ieee80211_ch_switch_timing); + u8 *pos = extra_ies; + struct sk_buff *skb; + + /* + * if chandef points to a wide channel add a Secondary-Channel + * Offset information element + */ + if (chandef->width == NL80211_CHAN_WIDTH_40) { + struct ieee80211_sec_chan_offs_ie *sec_chan_ie; + bool ht40plus; + + *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; + *pos++ = sizeof(*sec_chan_ie); + sec_chan_ie = (void *)pos; + + ht40plus = cfg80211_get_chandef_type(chandef) == + NL80211_CHAN_HT40PLUS; + sec_chan_ie->sec_chan_offs = ht40plus ? + IEEE80211_HT_PARAM_CHA_SEC_ABOVE : + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + pos += sizeof(*sec_chan_ie); + + extra_ies_len += 2 + sizeof(struct ieee80211_sec_chan_offs_ie); + } + + /* just set the values to 0, this is a template */ + iee80211_tdls_add_ch_switch_timing(pos, 0, 0); + + skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, + WLAN_TDLS_CHANNEL_SWITCH_REQUEST, + 0, 0, !sta->sta.tdls_initiator, + extra_ies, extra_ies_len, + oper_class, chandef); + if (!skb) + return NULL; + + skb = ieee80211_build_data_template(sdata, skb, 0); + if (IS_ERR(skb)) { + tdls_dbg(sdata, "Failed building TDLS channel switch frame\n"); + return NULL; + } + + if (ch_sw_tm_ie_offset) { + const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb); + + if (!tm_ie) { + tdls_dbg(sdata, "No switch timing IE in TDLS switch\n"); + dev_kfree_skb_any(skb); + return NULL; + } + + *ch_sw_tm_ie_offset = tm_ie - skb->data; + } + + tdls_dbg(sdata, + "TDLS channel switch request template for %pM ch %d width %d\n", + sta->sta.addr, chandef->chan->center_freq, chandef->width); + return skb; +} + +int +ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct sk_buff *skb = NULL; + u32 ch_sw_tm_ie; + int ret; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, addr); + if (!sta) { + tdls_dbg(sdata, + "Invalid TDLS peer %pM for channel switch request\n", + addr); + ret = -ENOENT; + goto out; + } + + if (!test_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH)) { + tdls_dbg(sdata, "TDLS channel switch unsupported by %pM\n", + addr); + ret = -ENOTSUPP; + goto out; + } + + skb = ieee80211_tdls_ch_sw_tmpl_get(sta, oper_class, chandef, + &ch_sw_tm_ie); + if (!skb) { + ret = -ENOENT; + goto out; + } + + ret = drv_tdls_channel_switch(local, sdata, &sta->sta, oper_class, + chandef, skb, ch_sw_tm_ie); + if (!ret) + set_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); + +out: + mutex_unlock(&local->sta_mtx); + dev_kfree_skb_any(skb); + return ret; +} + +void +ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, addr); + if (!sta) { + tdls_dbg(sdata, + "Invalid TDLS peer %pM for channel switch cancel\n", + addr); + goto out; + } + + if (!test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { + tdls_dbg(sdata, "TDLS channel switch not initiated by %pM\n", + addr); + goto out; + } + + drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); + clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); + +out: + mutex_unlock(&local->sta_mtx); +} + +static struct sk_buff * +ieee80211_tdls_ch_sw_resp_tmpl_get(struct sta_info *sta, + u32 *ch_sw_tm_ie_offset) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct sk_buff *skb; + u8 extra_ies[2 + sizeof(struct ieee80211_ch_switch_timing)]; + + /* initial timing are always zero in the template */ + iee80211_tdls_add_ch_switch_timing(extra_ies, 0, 0); + + skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, + WLAN_TDLS_CHANNEL_SWITCH_RESPONSE, + 0, 0, !sta->sta.tdls_initiator, + extra_ies, sizeof(extra_ies), 0, NULL); + if (!skb) + return NULL; + + skb = ieee80211_build_data_template(sdata, skb, 0); + if (IS_ERR(skb)) { + tdls_dbg(sdata, + "Failed building TDLS channel switch resp frame\n"); + return NULL; + } + + if (ch_sw_tm_ie_offset) { + const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb); + + if (!tm_ie) { + tdls_dbg(sdata, + "No switch timing IE in TDLS switch resp\n"); + dev_kfree_skb_any(skb); + return NULL; + } + + *ch_sw_tm_ie_offset = tm_ie - skb->data; + } + + tdls_dbg(sdata, "TDLS get channel switch response template for %pM\n", + sta->sta.addr); + return skb; +} + +static int +ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee802_11_elems elems; + struct sta_info *sta; + struct ieee80211_tdls_data *tf = (void *)skb->data; + bool local_initiator; + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + int baselen = offsetof(typeof(*tf), u.chan_switch_resp.variable); + struct ieee80211_tdls_ch_sw_params params = {}; + int ret; + + params.action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE; + params.timestamp = rx_status->device_timestamp; + + if (skb->len < baselen) { + tdls_dbg(sdata, "TDLS channel switch resp too short: %d\n", + skb->len); + return -EINVAL; + } + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, tf->sa); + if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { + tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n", + tf->sa); + ret = -EINVAL; + goto out; + } + + params.sta = &sta->sta; + params.status = le16_to_cpu(tf->u.chan_switch_resp.status_code); + if (params.status != 0) { + ret = 0; + goto call_drv; + } + + ieee802_11_parse_elems(tf->u.chan_switch_resp.variable, + skb->len - baselen, false, &elems); + if (elems.parse_error) { + tdls_dbg(sdata, "Invalid IEs in TDLS channel switch resp\n"); + ret = -EINVAL; + goto out; + } + + if (!elems.ch_sw_timing || !elems.lnk_id) { + tdls_dbg(sdata, "TDLS channel switch resp - missing IEs\n"); + ret = -EINVAL; + goto out; + } + + /* validate the initiator is set correctly */ + local_initiator = + !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); + if (local_initiator == sta->sta.tdls_initiator) { + tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); + ret = -EINVAL; + goto out; + } + + params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); + params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); + + params.tmpl_skb = + ieee80211_tdls_ch_sw_resp_tmpl_get(sta, ¶ms.ch_sw_tm_ie); + if (!params.tmpl_skb) { + ret = -ENOENT; + goto out; + } + +call_drv: + drv_tdls_recv_channel_switch(sdata->local, sdata, ¶ms); + + tdls_dbg(sdata, + "TDLS channel switch response received from %pM status %d\n", + tf->sa, params.status); + +out: + mutex_unlock(&local->sta_mtx); + dev_kfree_skb_any(params.tmpl_skb); + return ret; +} + +static int +ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee802_11_elems elems; + struct cfg80211_chan_def chandef; + struct ieee80211_channel *chan; + enum nl80211_channel_type chan_type; + int freq; + u8 target_channel, oper_class; + bool local_initiator; + struct sta_info *sta; + enum ieee80211_band band; + struct ieee80211_tdls_data *tf = (void *)skb->data; + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + int baselen = offsetof(typeof(*tf), u.chan_switch_req.variable); + struct ieee80211_tdls_ch_sw_params params = {}; + int ret = 0; + + params.action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST; + params.timestamp = rx_status->device_timestamp; + + if (skb->len < baselen) { + tdls_dbg(sdata, "TDLS channel switch req too short: %d\n", + skb->len); + return -EINVAL; + } + + target_channel = tf->u.chan_switch_req.target_channel; + oper_class = tf->u.chan_switch_req.oper_class; + + /* + * We can't easily infer the channel band. The operating class is + * ambiguous - there are multiple tables (US/Europe/JP/Global). The + * solution here is to treat channels with number >14 as 5GHz ones, + * and specifically check for the (oper_class, channel) combinations + * where this doesn't hold. These are thankfully unique according to + * IEEE802.11-2012. + * We consider only the 2GHz and 5GHz bands and 20MHz+ channels as + * valid here. + */ + if ((oper_class == 112 || oper_class == 2 || oper_class == 3 || + oper_class == 4 || oper_class == 5 || oper_class == 6) && + target_channel < 14) + band = IEEE80211_BAND_5GHZ; + else + band = target_channel < 14 ? IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ; + + freq = ieee80211_channel_to_frequency(target_channel, band); + if (freq == 0) { + tdls_dbg(sdata, "Invalid channel in TDLS chan switch: %d\n", + target_channel); + return -EINVAL; + } + + chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq); + if (!chan) { + tdls_dbg(sdata, + "Unsupported channel for TDLS chan switch: %d\n", + target_channel); + return -EINVAL; + } + + ieee802_11_parse_elems(tf->u.chan_switch_req.variable, + skb->len - baselen, false, &elems); + if (elems.parse_error) { + tdls_dbg(sdata, "Invalid IEs in TDLS channel switch req\n"); + return -EINVAL; + } + + if (!elems.ch_sw_timing || !elems.lnk_id) { + tdls_dbg(sdata, "TDLS channel switch req - missing IEs\n"); + return -EINVAL; + } + + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, tf->sa); + if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { + tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n", + tf->sa); + ret = -EINVAL; + goto out; + } + + params.sta = &sta->sta; + + /* validate the initiator is set correctly */ + local_initiator = + !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); + if (local_initiator == sta->sta.tdls_initiator) { + tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); + ret = -EINVAL; + goto out; + } + + if (!sta->sta.ht_cap.ht_supported) { + chan_type = NL80211_CHAN_NO_HT; + } else if (!elems.sec_chan_offs) { + chan_type = NL80211_CHAN_HT20; + } else { + switch (elems.sec_chan_offs->sec_chan_offs) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + chan_type = NL80211_CHAN_HT40PLUS; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + chan_type = NL80211_CHAN_HT40MINUS; + break; + default: + chan_type = NL80211_CHAN_HT20; + break; + } + } + + cfg80211_chandef_create(&chandef, chan, chan_type); + params.chandef = &chandef; + + params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); + params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); + + params.tmpl_skb = + ieee80211_tdls_ch_sw_resp_tmpl_get(sta, + ¶ms.ch_sw_tm_ie); + if (!params.tmpl_skb) { + ret = -ENOENT; + goto out; + } + + drv_tdls_recv_channel_switch(sdata->local, sdata, ¶ms); + + tdls_dbg(sdata, + "TDLS ch switch request received from %pM ch %d width %d\n", + tf->sa, params.chandef->chan->center_freq, + params.chandef->width); +out: + mutex_unlock(&local->sta_mtx); + dev_kfree_skb_any(params.tmpl_skb); + return ret; +} + +void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_tdls_data *tf = (void *)skb->data; + struct wiphy *wiphy = sdata->local->hw.wiphy; + + /* make sure the driver supports it */ + if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) + return; + + /* we want to access the entire packet */ + if (skb_linearize(skb)) + return; + /* + * The packet/size was already validated by mac80211 Rx path, only look + * at the action type. + */ + switch (tf->action_code) { + case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: + ieee80211_process_tdls_channel_switch_req(sdata, skb); + break; + case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: + ieee80211_process_tdls_channel_switch_resp(sdata, skb); + break; + default: + WARN_ON_ONCE(1); + return; + } +} diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 38fae7ebe984a621540328b6bdad28dcd632fccb..8e461a02c6a8ed9f8851979eed7992262704f60a 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -16,6 +16,7 @@ #define STA_ENTRY __array(char, sta_addr, ETH_ALEN) #define STA_ASSIGN (sta ? memcpy(__entry->sta_addr, sta->addr, ETH_ALEN) : memset(__entry->sta_addr, 0, ETH_ALEN)) +#define STA_NAMED_ASSIGN(s) memcpy(__entry->sta_addr, (s)->addr, ETH_ALEN) #define STA_PR_FMT " sta:%pM" #define STA_PR_ARG __entry->sta_addr @@ -595,14 +596,33 @@ DEFINE_EVENT(local_sdata_evt, drv_sched_scan_stop, TP_ARGS(local, sdata) ); -DEFINE_EVENT(local_only_evt, drv_sw_scan_start, - TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) +TRACE_EVENT(drv_sw_scan_start, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const u8 *mac_addr), + + TP_ARGS(local, sdata, mac_addr), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __array(char, mac_addr, ETH_ALEN) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + memcpy(__entry->mac_addr, mac_addr, ETH_ALEN); + ), + + TP_printk(LOCAL_PR_FMT ", " VIF_PR_FMT ", addr:%pM", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->mac_addr) ); -DEFINE_EVENT(local_only_evt, drv_sw_scan_complete, - TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) +DEFINE_EVENT(local_sdata_evt, drv_sw_scan_complete, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) ); TRACE_EVENT(drv_get_stats, @@ -826,6 +846,13 @@ DEFINE_EVENT(sta_event, drv_sta_pre_rcu_remove, TP_ARGS(local, sdata, sta) ); +DEFINE_EVENT(sta_event, drv_sta_rate_tbl_update, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + TRACE_EVENT(drv_conf_tx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, @@ -987,29 +1014,34 @@ TRACE_EVENT(drv_flush, TRACE_EVENT(drv_channel_switch, TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch), - TP_ARGS(local, ch_switch), + TP_ARGS(local, sdata, ch_switch), TP_STRUCT__entry( LOCAL_ENTRY + VIF_ENTRY CHANDEF_ENTRY __field(u64, timestamp) + __field(u32, device_timestamp) __field(bool, block_tx) __field(u8, count) ), TP_fast_assign( LOCAL_ASSIGN; + VIF_ASSIGN; CHANDEF_ASSIGN(&ch_switch->chandef) __entry->timestamp = ch_switch->timestamp; + __entry->device_timestamp = ch_switch->device_timestamp; __entry->block_tx = ch_switch->block_tx; __entry->count = ch_switch->count; ), TP_printk( - LOCAL_PR_FMT " new " CHANDEF_PR_FMT " count:%d", - LOCAL_PR_ARG, CHANDEF_PR_ARG, __entry->count + LOCAL_PR_FMT VIF_PR_FMT " new " CHANDEF_PR_FMT " count:%d", + LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count ) ); @@ -1557,9 +1589,26 @@ DEFINE_EVENT(local_sdata_evt, drv_stop_ap, TP_ARGS(local, sdata) ); -DEFINE_EVENT(local_only_evt, drv_restart_complete, - TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) +TRACE_EVENT(drv_reconfig_complete, + TP_PROTO(struct ieee80211_local *local, + enum ieee80211_reconfig_type reconfig_type), + TP_ARGS(local, reconfig_type), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u8, reconfig_type) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->reconfig_type = reconfig_type; + ), + + TP_printk( + LOCAL_PR_FMT " reconfig_type:%d", + LOCAL_PR_ARG, __entry->reconfig_type + ) + ); #if IS_ENABLED(CONFIG_IPV6) @@ -1780,6 +1829,12 @@ TRACE_EVENT(api_cqm_rssi_notify, ) ); +DEFINE_EVENT(local_sdata_evt, api_cqm_beacon_loss_notify, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + TRACE_EVENT(api_scan_completed, TP_PROTO(struct ieee80211_local *local, bool aborted), @@ -2106,6 +2161,175 @@ TRACE_EVENT(drv_channel_switch_beacon, ) ); +TRACE_EVENT(drv_pre_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_switch *ch_switch), + + TP_ARGS(local, sdata, ch_switch), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + CHANDEF_ENTRY + __field(u64, timestamp) + __field(u32, device_timestamp) + __field(bool, block_tx) + __field(u8, count) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + CHANDEF_ASSIGN(&ch_switch->chandef) + __entry->timestamp = ch_switch->timestamp; + __entry->device_timestamp = ch_switch->device_timestamp; + __entry->block_tx = ch_switch->block_tx; + __entry->count = ch_switch->count; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " prepare channel switch to " + CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu", + LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count, + __entry->block_tx, __entry->timestamp + ) +); + +DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata), + TP_ARGS(local, sdata) +); + +TRACE_EVENT(drv_get_txpower, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + int dbm, int ret), + + TP_ARGS(local, sdata, dbm, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(int, dbm) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->dbm = dbm; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " dbm:%d ret:%d", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->dbm, __entry->ret + ) +); + +TRACE_EVENT(drv_tdls_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef), + + TP_ARGS(local, sdata, sta, oper_class, chandef), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u8, oper_class) + CHANDEF_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->oper_class = oper_class; + CHANDEF_ASSIGN(chandef) + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " tdls channel switch to" + CHANDEF_PR_FMT " oper_class:%d " STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->oper_class, + STA_PR_ARG + ) +); + +TRACE_EVENT(drv_tdls_cancel_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + + TP_ARGS(local, sdata, sta), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT + " tdls cancel channel switch with " STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG + ) +); + +TRACE_EVENT(drv_tdls_recv_channel_switch, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_tdls_ch_sw_params *params), + + TP_ARGS(local, sdata, params), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(u8, action_code) + STA_ENTRY + CHANDEF_ENTRY + __field(u32, status) + __field(bool, peer_initiator) + __field(u32, timestamp) + __field(u16, switch_time) + __field(u16, switch_timeout) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_NAMED_ASSIGN(params->sta); + CHANDEF_ASSIGN(params->chandef) + __entry->peer_initiator = params->sta->tdls_initiator; + __entry->action_code = params->action_code; + __entry->status = params->status; + __entry->timestamp = params->timestamp; + __entry->switch_time = params->switch_time; + __entry->switch_timeout = params->switch_timeout; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " received tdls channel switch packet" + " action:%d status:%d time:%d switch time:%d switch" + " timeout:%d initiator: %d chan:" CHANDEF_PR_FMT STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, __entry->action_code, __entry->status, + __entry->timestamp, __entry->switch_time, + __entry->switch_timeout, __entry->peer_initiator, + CHANDEF_PR_ARG, STA_PR_ARG + ) +); #ifdef CONFIG_MAC80211_MESSAGE_TRACING #undef TRACE_SYSTEM diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 900632a250ecc7b0f4c57190d68bcdd45169e60e..058686a721a1de5e1145da56d3f176d1e08ed739 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -60,7 +60,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, rcu_read_unlock(); /* assume HW handles this */ - if (tx->rate.flags & IEEE80211_TX_RC_MCS) + if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS)) return 0; /* uh huh? */ @@ -296,6 +296,9 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) */ return TX_DROP; + if (tx->sdata->vif.type == NL80211_IFTYPE_OCB) + return TX_CONTINUE; + if (tx->sdata->vif.type == NL80211_IFTYPE_WDS) return TX_CONTINUE; @@ -1423,8 +1426,7 @@ EXPORT_SYMBOL(ieee80211_tx_prepare_skb); * Returns false if the frame couldn't be transmitted but was queued instead. */ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb, bool txpending, - enum ieee80211_band band) + struct sk_buff *skb, bool txpending) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_data tx; @@ -1449,8 +1451,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, return true; } - info->band = band; - /* set up hw_queue value early */ if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) @@ -1498,8 +1498,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, return 0; } -void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, - enum ieee80211_band band) +void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1534,7 +1533,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, } ieee80211_set_qos_hdr(sdata, skb); - ieee80211_tx(sdata, skb, false, band); + ieee80211_tx(sdata, skb, false); } static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb) @@ -1754,7 +1753,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, sdata->vif.type)) goto fail_rcu; - ieee80211_xmit(sdata, skb, chandef->chan->band); + info->band = chandef->chan->band; + ieee80211_xmit(sdata, skb); rcu_read_unlock(); return NETDEV_TX_OK; @@ -1784,23 +1784,26 @@ static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local, } /** - * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type - * subinterfaces (wlan#, WDS, and VLAN interfaces) - * @skb: packet to be sent - * @dev: incoming interface + * ieee80211_build_hdr - build 802.11 header in the given frame + * @sdata: virtual interface to build the header for + * @skb: the skb to build the header in + * @info_flags: skb flags to set + * + * This function takes the skb with 802.3 header and reformats the header to + * the appropriate IEEE 802.11 header based on which interface the packet is + * being transmitted on. + * + * Note that this function also takes care of the TX status request and + * potential unsharing of the SKB - this needs to be interleaved with the + * header building. * - * Returns: NETDEV_TX_OK both on success and on failure. On failure skb will - * be freed. + * The function requires the read-side RCU lock held * - * This function takes in an Ethernet header and encapsulates it with suitable - * IEEE 802.11 header based on which interface the packet is coming in. The - * encapsulated packet will then be passed to master interface, wlan#.11, for - * transmission (through low-level driver). + * Returns: the (possibly reallocated) skb or an ERR_PTR() code */ -netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 info_flags) { - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info; int head_need; @@ -1816,25 +1819,17 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, bool wme_sta = false, authorized = false, tdls_auth = false; bool tdls_peer = false, tdls_setup_frame = false; bool multicast; - u32 info_flags = 0; u16 info_id = 0; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_sub_if_data *ap_sdata; enum ieee80211_band band; - - if (unlikely(skb->len < ETH_HLEN)) - goto fail; + int ret; /* convert Ethernet header to proper 802.11 header (based on * operation mode) */ ethertype = (skb->data[12] << 8) | skb->data[13]; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); - rcu_read_lock(); - - /* Measure frame arrival for Tx latency statistics calculation */ - ieee80211_tx_latency_start_msrmnt(local, skb); - switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: sta = rcu_dereference(sdata->u.vlan.sta); @@ -1852,8 +1847,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf); - if (!chanctx_conf) - goto fail_rcu; + if (!chanctx_conf) { + ret = -ENOTCONN; + goto free; + } band = chanctx_conf->def.chan->band; if (sta) break; @@ -1861,8 +1858,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, case NL80211_IFTYPE_AP: if (sdata->vif.type == NL80211_IFTYPE_AP) chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (!chanctx_conf) - goto fail_rcu; + if (!chanctx_conf) { + ret = -ENOTCONN; + goto free; + } fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ memcpy(hdr.addr1, skb->data, ETH_ALEN); @@ -1949,8 +1948,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (!chanctx_conf) - goto fail_rcu; + if (!chanctx_conf) { + ret = -ENOTCONN; + goto free; + } band = chanctx_conf->def.chan->band; break; #endif @@ -1980,8 +1981,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, * of a link teardown after a TDLS sta is removed due to being * unreachable. */ - if (tdls_peer && !tdls_auth && !tdls_setup_frame) - goto fail_rcu; + if (tdls_peer && !tdls_auth && !tdls_setup_frame) { + ret = -EINVAL; + goto free; + } /* send direct packets to authorized TDLS peers */ if (tdls_peer && tdls_auth) { @@ -2009,8 +2012,23 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, hdrlen = 24; } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (!chanctx_conf) - goto fail_rcu; + if (!chanctx_conf) { + ret = -ENOTCONN; + goto free; + } + band = chanctx_conf->def.chan->band; + break; + case NL80211_IFTYPE_OCB: + /* DA SA BSSID */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + eth_broadcast_addr(hdr.addr3); + hdrlen = 24; + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + if (!chanctx_conf) { + ret = -ENOTCONN; + goto free; + } band = chanctx_conf->def.chan->band; break; case NL80211_IFTYPE_ADHOC: @@ -2020,12 +2038,15 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); hdrlen = 24; chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (!chanctx_conf) - goto fail_rcu; + if (!chanctx_conf) { + ret = -ENOTCONN; + goto free; + } band = chanctx_conf->def.chan->band; break; default: - goto fail_rcu; + ret = -EINVAL; + goto free; } /* @@ -2057,17 +2078,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, * EAPOL frames from the local station. */ if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) && + (sdata->vif.type != NL80211_IFTYPE_OCB) && !multicast && !authorized && (cpu_to_be16(ethertype) != sdata->control_port_protocol || !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n", - dev->name, hdr.addr1); + sdata->name, hdr.addr1); #endif I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); - goto fail_rcu; + ret = -EPERM; + goto free; } if (unlikely(!multicast && skb->sk && @@ -2104,8 +2127,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, skb = skb_clone(skb, GFP_ATOMIC); kfree_skb(tmp_skb); - if (!skb) - goto fail_rcu; + if (!skb) { + ret = -ENOMEM; + goto free; + } } hdr.frame_control = fc; @@ -2154,7 +2179,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, if (ieee80211_skb_resize(sdata, skb, head_need, true)) { ieee80211_free_txskb(&local->hw, skb); skb = NULL; - goto fail_rcu; + return ERR_PTR(-ENOMEM); } } @@ -2188,9 +2213,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, nh_pos += hdrlen; h_pos += hdrlen; - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - /* Update skb pointers to various headers since this modified frame * is going to go through Linux networking code that may potentially * need things like pointer to IP header. */ @@ -2201,23 +2223,90 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); - dev->trans_start = jiffies; - info->flags = info_flags; info->ack_frame_id = info_id; + info->band = band; - ieee80211_xmit(sdata, skb, band); - rcu_read_unlock(); + return skb; + free: + kfree_skb(skb); + return ERR_PTR(ret); +} - return NETDEV_TX_OK; +void __ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev, + u32 info_flags) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + + if (unlikely(skb->len < ETH_HLEN)) { + kfree_skb(skb); + return; + } + + rcu_read_lock(); + + /* Measure frame arrival for Tx latency statistics calculation */ + ieee80211_tx_latency_start_msrmnt(local, skb); + + skb = ieee80211_build_hdr(sdata, skb, info_flags); + if (IS_ERR(skb)) + goto out; - fail_rcu: + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev->trans_start = jiffies; + + ieee80211_xmit(sdata, skb); + out: rcu_read_unlock(); - fail: - dev_kfree_skb(skb); +} + +/** + * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs + * @skb: packet to be sent + * @dev: incoming interface + * + * On failure skb will be freed. + */ +netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + __ieee80211_subif_start_xmit(skb, dev, 0); return NETDEV_TX_OK; } +struct sk_buff * +ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, u32 info_flags) +{ + struct ieee80211_hdr *hdr; + struct ieee80211_tx_data tx = { + .local = sdata->local, + .sdata = sdata, + }; + + rcu_read_lock(); + + skb = ieee80211_build_hdr(sdata, skb, info_flags); + if (IS_ERR(skb)) + goto out; + + hdr = (void *)skb->data; + tx.sta = sta_info_get(sdata, hdr->addr1); + tx.skb = skb; + + if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) { + rcu_read_unlock(); + kfree_skb(skb); + return ERR_PTR(-EINVAL); + } + +out: + rcu_read_unlock(); + return skb; +} /* * ieee80211_clear_tx_pending may not be called in a context where @@ -2257,8 +2346,8 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, dev_kfree_skb(skb); return true; } - result = ieee80211_tx(sdata, skb, true, - chanctx_conf->def.chan->band); + info->band = chanctx_conf->def.chan->band; + result = ieee80211_tx(sdata, skb, true); } else { struct sk_buff_head skbs; @@ -2872,19 +2961,16 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, EXPORT_SYMBOL(ieee80211_nullfunc_get); struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, + const u8 *src_addr, const u8 *ssid, size_t ssid_len, size_t tailroom) { - struct ieee80211_sub_if_data *sdata; - struct ieee80211_local *local; + struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_hdr_3addr *hdr; struct sk_buff *skb; size_t ie_ssid_len; u8 *pos; - sdata = vif_to_sdata(vif); - local = sdata->local; ie_ssid_len = 2 + ssid_len; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + @@ -2899,7 +2985,7 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); eth_broadcast_addr(hdr->addr1); - memcpy(hdr->addr2, vif->addr, ETH_ALEN); + memcpy(hdr->addr2, src_addr, ETH_ALEN); eth_broadcast_addr(hdr->addr3); pos = skb_put(skb, ie_ssid_len); @@ -3018,6 +3104,97 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_get_buffered_bc); +int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + int ret; + u32 queues; + + lockdep_assert_held(&local->sta_mtx); + + /* only some cases are supported right now */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + break; + default: + WARN_ON(1); + return -EINVAL; + } + + if (WARN_ON(tid >= IEEE80211_NUM_UPS)) + return -EINVAL; + + if (sta->reserved_tid == tid) { + ret = 0; + goto out; + } + + if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) { + sdata_err(sdata, "TID reservation already active\n"); + ret = -EALREADY; + goto out; + } + + ieee80211_stop_vif_queues(sdata->local, sdata, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID); + + synchronize_net(); + + /* Tear down BA sessions so we stop aggregating on this TID */ + if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) { + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + __ieee80211_stop_tx_ba_session(sta, tid, + AGG_STOP_LOCAL_REQUEST); + } + + queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]); + __ieee80211_flush_queues(local, sdata, queues); + + sta->reserved_tid = tid; + + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID); + + if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); + + ret = 0; + out: + return ret; +} +EXPORT_SYMBOL(ieee80211_reserve_tid); + +void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_sub_if_data *sdata = sta->sdata; + + lockdep_assert_held(&sdata->local->sta_mtx); + + /* only some cases are supported right now */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + break; + default: + WARN_ON(1); + return; + } + + if (tid != sta->reserved_tid) { + sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid); + return; + } + + sta->reserved_tid = IEEE80211_TID_UNRESERVED; +} +EXPORT_SYMBOL(ieee80211_unreserve_tid); + void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, enum ieee80211_band band) @@ -3039,6 +3216,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, * requirements are that we do not come into tx with bhs on. */ local_bh_disable(); - ieee80211_xmit(sdata, skb, band); + IEEE80211_SKB_CB(skb)->band = band; + ieee80211_xmit(sdata, skb); local_bh_enable(); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 3c61060a4d2b75bd89d4a792dde8b4721eb435dc..974ebe70f5b0139c0dd2aec7335201dec51e6aca 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -576,15 +576,19 @@ ieee80211_get_vif_queues(struct ieee80211_local *local, return queues; } -void ieee80211_flush_queues(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata) +void __ieee80211_flush_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + unsigned int queues) { - unsigned int queues; - if (!local->ops->flush) return; - queues = ieee80211_get_vif_queues(local, sdata); + /* + * If no queue was set, or if the HW doesn't support + * IEEE80211_HW_QUEUE_CONTROL - flush all queues + */ + if (!queues || !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) + queues = ieee80211_get_vif_queues(local, sdata); ieee80211_stop_queues_by_reason(&local->hw, queues, IEEE80211_QUEUE_STOP_REASON_FLUSH, @@ -597,6 +601,12 @@ void ieee80211_flush_queues(struct ieee80211_local *local, false); } +void ieee80211_flush_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + __ieee80211_flush_queues(local, sdata, 0); +} + void ieee80211_stop_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason) @@ -693,6 +703,34 @@ void ieee80211_iterate_active_interfaces_rtnl( } EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_rtnl); +static void __iterate_stations(struct ieee80211_local *local, + void (*iterator)(void *data, + struct ieee80211_sta *sta), + void *data) +{ + struct sta_info *sta; + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (!sta->uploaded) + continue; + + iterator(data, &sta->sta); + } +} + +void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, + void (*iterator)(void *data, + struct ieee80211_sta *sta), + void *data) +{ + struct ieee80211_local *local = hw_to_local(hw); + + rcu_read_lock(); + __iterate_stations(local, iterator, data); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(ieee80211_iterate_stations_atomic); + struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); @@ -803,6 +841,9 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, case WLAN_EID_SECONDARY_CHANNEL_OFFSET: case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: case WLAN_EID_CHAN_SWITCH_PARAM: + case WLAN_EID_EXT_CAPABILITY: + case WLAN_EID_CHAN_SWITCH_TIMING: + case WLAN_EID_LINK_ID: /* * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible * that if the content gets bigger it might be needed more than once @@ -822,6 +863,24 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, elem_parse_failed = false; switch (id) { + case WLAN_EID_LINK_ID: + if (elen + 2 != sizeof(struct ieee80211_tdls_lnkie)) { + elem_parse_failed = true; + break; + } + elems->lnk_id = (void *)(pos - 2); + break; + case WLAN_EID_CHAN_SWITCH_TIMING: + if (elen != sizeof(struct ieee80211_ch_switch_timing)) { + elem_parse_failed = true; + break; + } + elems->ch_sw_timing = (void *)pos; + break; + case WLAN_EID_EXT_CAPABILITY: + elems->ext_capab = pos; + elems->ext_capab_len = elen; + break; case WLAN_EID_SSID: elems->ssid = pos; elems->ssid_len = elen; @@ -1073,6 +1132,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, struct ieee80211_chanctx_conf *chanctx_conf; int ac; bool use_11b, enable_qos; + bool is_ocb; /* Use another EDCA parameters if dot11OCBActivated=true */ int aCWmin, aCWmax; if (!local->ops->conf_tx) @@ -1097,6 +1157,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, */ enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION); + is_ocb = (sdata->vif.type == NL80211_IFTYPE_OCB); + /* Set defaults according to 802.11-2007 Table 7-37 */ aCWmax = 1023; if (use_11b) @@ -1118,7 +1180,10 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; - qparam.aifs = 7; + if (is_ocb) + qparam.aifs = 9; + else + qparam.aifs = 7; break; /* never happens but let's not leave undefined */ default: @@ -1126,21 +1191,32 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; - qparam.aifs = 3; + if (is_ocb) + qparam.aifs = 6; + else + qparam.aifs = 3; break; case IEEE80211_AC_VI: qparam.cw_max = aCWmin; qparam.cw_min = (aCWmin + 1) / 2 - 1; - if (use_11b) + if (is_ocb) + qparam.txop = 0; + else if (use_11b) qparam.txop = 6016/32; else qparam.txop = 3008/32; - qparam.aifs = 2; + + if (is_ocb) + qparam.aifs = 3; + else + qparam.aifs = 2; break; case IEEE80211_AC_VO: qparam.cw_max = (aCWmin + 1) / 2 - 1; qparam.cw_min = (aCWmin + 1) / 4 - 1; - if (use_11b) + if (is_ocb) + qparam.txop = 0; + else if (use_11b) qparam.txop = 3264/32; else qparam.txop = 1504/32; @@ -1263,6 +1339,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, int ext_rates_len; int shift; u32 rate_flags; + bool have_80mhz = false; *offset = 0; @@ -1391,7 +1468,15 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, *offset = noffset; } - if (sband->vht_cap.vht_supported) { + /* Check if any channel in this sband supports at least 80 MHz */ + for (i = 0; i < sband->n_channels; i++) { + if (!(sband->channels[i].flags & IEEE80211_CHAN_NO_80MHZ)) { + have_80mhz = true; + break; + } + } + + if (sband->vht_cap.vht_supported && have_80mhz) { if (end - pos < 2 + sizeof(struct ieee80211_vht_cap)) goto out_err; pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, @@ -1447,7 +1532,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, }; struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, - u8 *dst, u32 ratemask, + const u8 *src, const u8 *dst, + u32 ratemask, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, @@ -1472,8 +1558,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, else chandef.chan = chan; - skb = ieee80211_probereq_get(&local->hw, &sdata->vif, - ssid, ssid_len, 100 + ie_len); + skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len, + 100 + ie_len); if (!skb) return NULL; @@ -1495,7 +1581,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, return skb; } -void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, +void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 ratemask, bool directed, u32 tx_flags, @@ -1503,7 +1590,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, { struct sk_buff *skb; - skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel, + skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, ssid, ssid_len, ie, ie_len, directed); if (skb) { @@ -1645,6 +1732,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) int res, i; bool reconfig_due_to_wowlan = false; struct ieee80211_sub_if_data *sched_scan_sdata; + struct cfg80211_sched_scan_request *sched_scan_req; bool sched_scan_stopped = false; #ifdef CONFIG_PM @@ -1813,6 +1901,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) ieee80211_bss_info_change_notify(sdata, changed); sdata_unlock(sdata); break; + case NL80211_IFTYPE_OCB: + changed |= BSS_CHANGED_OCB; + ieee80211_bss_info_change_notify(sdata, changed); + break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; /* fall through */ @@ -1931,13 +2023,15 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_lock(&local->mtx); sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, lockdep_is_held(&local->mtx)); - if (sched_scan_sdata && local->sched_scan_req) + sched_scan_req = rcu_dereference_protected(local->sched_scan_req, + lockdep_is_held(&local->mtx)); + if (sched_scan_sdata && sched_scan_req) /* * Sched scan stopped, but we don't want to report it. Instead, * we're trying to reschedule. */ if (__ieee80211_request_sched_scan_start(sched_scan_sdata, - local->sched_scan_req)) + sched_scan_req)) sched_scan_stopped = true; mutex_unlock(&local->mtx); @@ -1949,7 +2043,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) * We may want to change that later, however. */ if (!local->suspended || reconfig_due_to_wowlan) - drv_restart_complete(local); + drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART); if (!local->suspended) return 0; @@ -1960,6 +2054,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) mb(); local->resuming = false; + if (!reconfig_due_to_wowlan) + drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); + list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; @@ -2052,42 +2149,36 @@ static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id) return false; } -/** - * ieee80211_ie_split - split an IE buffer according to ordering - * - * @ies: the IE buffer - * @ielen: the length of the IE buffer - * @ids: an array with element IDs that are allowed before - * the split - * @n_ids: the size of the element ID array - * @offset: offset where to start splitting in the buffer - * - * This function splits an IE buffer by updating the @offset - * variable to point to the location where the buffer should be - * split. - * - * It assumes that the given IE buffer is well-formed, this - * has to be guaranteed by the caller! - * - * It also assumes that the IEs in the buffer are ordered - * correctly, if not the result of using this function will not - * be ordered correctly either, i.e. it does no reordering. - * - * The function returns the offset where the next part of the - * buffer starts, which may be @ielen if the entire (remainder) - * of the buffer should be used. - */ -size_t ieee80211_ie_split(const u8 *ies, size_t ielen, - const u8 *ids, int n_ids, size_t offset) +size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, + const u8 *after_ric, int n_after_ric, + size_t offset) { size_t pos = offset; - while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos])) - pos += 2 + ies[pos + 1]; + while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos])) { + if (ies[pos] == WLAN_EID_RIC_DATA && n_after_ric) { + pos += 2 + ies[pos + 1]; + + while (pos < ielen && + !ieee80211_id_in_list(after_ric, n_after_ric, + ies[pos])) + pos += 2 + ies[pos + 1]; + } else { + pos += 2 + ies[pos + 1]; + } + } return pos; } +size_t ieee80211_ie_split(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, size_t offset) +{ + return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset); +} +EXPORT_SYMBOL(ieee80211_ie_split); + size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset) { size_t pos = offset; @@ -2526,11 +2617,23 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work) struct ieee80211_local *local = container_of(work, struct ieee80211_local, radar_detected_work); struct cfg80211_chan_def chandef = local->hw.conf.chandef; + struct ieee80211_chanctx *ctx; + int num_chanctx = 0; + + mutex_lock(&local->chanctx_mtx); + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) + continue; + + num_chanctx++; + chandef = ctx->conf.def; + } + mutex_unlock(&local->chanctx_mtx); ieee80211_dfs_cac_cancel(local); - if (local->use_chanctx) - /* currently not handled */ + if (num_chanctx > 1) + /* XXX: multi-channel is not supported yet */ WARN_ON(1); else cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 671ce0d27a80b01bfa5d457ef6134a19d45ee208..bc9e8fc48785f940d2df58518ae9f0a55a105037 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -287,6 +287,8 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) /* fall through */ case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: + bw = IEEE80211_STA_RX_BW_20; + break; case NL80211_CHAN_WIDTH_40: bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 9181fb6d643786788abfed37d3ed3d5130e3cb3e..a4220e92f0cc20c0feb04c307f6ed72097f4fe3a 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -111,8 +111,6 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return newhdr + hdrlen; - skb_set_network_header(skb, skb_network_offset(skb) + - IEEE80211_WEP_IV_LEN); ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); return newhdr + hdrlen; } diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 3b873989992cbe0d0ae9b09d76d799cb3585add4..9eb0aee9105b398b442380315ec758a430a103b1 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -53,11 +53,49 @@ static int wme_downgrade_ac(struct sk_buff *skb) } } +/** + * ieee80211_fix_reserved_tid - return the TID to use if this one is reserved + * @tid: the assumed-reserved TID + * + * Returns: the alternative TID to use, or 0 on error + */ +static inline u8 ieee80211_fix_reserved_tid(u8 tid) +{ + switch (tid) { + case 0: + return 3; + case 1: + return 2; + case 2: + return 1; + case 3: + return 0; + case 4: + return 5; + case 5: + return 4; + case 6: + return 7; + case 7: + return 6; + } + + return 0; +} + static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb) + struct sta_info *sta, struct sk_buff *skb) { + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + /* in case we are a client verify acm is not set for this ac */ - while (unlikely(sdata->wmm_acm & BIT(skb->priority))) { + while (sdata->wmm_acm & BIT(skb->priority)) { + int ac = ieee802_1d_to_ac[skb->priority]; + + if (ifmgd->tx_tspec[ac].admitted_time && + skb->priority == ifmgd->tx_tspec[ac].up) + return ac; + if (wme_downgrade_ac(skb)) { /* * This should not really happen. The AP has marked all @@ -69,6 +107,10 @@ static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata, } } + /* Check to see if this is a reserved TID */ + if (sta && sta->reserved_tid == skb->priority) + skb->priority = ieee80211_fix_reserved_tid(skb->priority); + /* look up which queue to use for frames with this 1d tag */ return ieee802_1d_to_ac[skb->priority]; } @@ -96,7 +138,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata, p = ieee80211_get_qos_ctl(hdr); skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; - return ieee80211_downgrade_queue(sdata, skb); + return ieee80211_downgrade_queue(sdata, NULL, skb); } /* Indicate which queue to use. */ @@ -108,6 +150,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, const u8 *ra = NULL; bool qos = false; struct mac80211_qos_map *qos_map; + u16 ret; if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) { skb->priority = 0; /* required for correct WPA/11i MIC */ @@ -134,11 +177,20 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, break; #endif case NL80211_IFTYPE_STATION: + /* might be a TDLS station */ + sta = sta_info_get(sdata, skb->data); + if (sta) + qos = sta->sta.wme; + ra = sdata->u.mgd.bssid; break; case NL80211_IFTYPE_ADHOC: ra = skb->data; break; + case NL80211_IFTYPE_OCB: + /* all stations are required to support WME */ + qos = true; + break; default: break; } @@ -148,27 +200,29 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, if (sta) qos = sta->sta.wme; } - rcu_read_unlock(); if (!qos) { skb->priority = 0; /* required for correct WPA/11i MIC */ - return IEEE80211_AC_BE; + ret = IEEE80211_AC_BE; + goto out; } if (skb->protocol == sdata->control_port_protocol) { skb->priority = 7; - return ieee80211_downgrade_queue(sdata, skb); + goto downgrade; } /* use the data classifier to determine what 802.1d tag the * data frame has */ - rcu_read_lock(); qos_map = rcu_dereference(sdata->qos_map); skb->priority = cfg80211_classify8021d(skb, qos_map ? &qos_map->qos_map : NULL); - rcu_read_unlock(); - return ieee80211_downgrade_queue(sdata, skb); + downgrade: + ret = ieee80211_downgrade_queue(sdata, sta, skb); + out: + rcu_read_unlock(); + return ret; } /** diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index 7fea4bb8acbc4985934ee265ade2546ab8e950ef..80151edc51955d913ef2b00608f29960557d73c7 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h @@ -13,8 +13,6 @@ #include #include "ieee80211_i.h" -extern const int ieee802_1d_to_ac[8]; - u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_hdr *hdr); diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 983527a4c1aba22c4ae6dc43c113ed1df9f1eb9c..12398fde02e87e7c7eb0eba1430e72287c6bb6f8 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -209,8 +209,6 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) pos = skb_push(skb, IEEE80211_TKIP_IV_LEN); memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen); - skb_set_network_header(skb, skb_network_offset(skb) + - IEEE80211_TKIP_IV_LEN); pos += hdrlen; /* the HW only needs room for the IV, but not the actual IV */ @@ -434,8 +432,6 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN); memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen); - skb_set_network_header(skb, skb_network_offset(skb) + - IEEE80211_CCMP_HDR_LEN); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && @@ -575,7 +571,6 @@ ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx, pos = skb_push(skb, cs->hdr_len); memmove(pos, pos + cs->hdr_len, hdrlen); - skb_set_network_header(skb, skb_network_offset(skb) + cs->hdr_len); return TX_CONTINUE; } diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig index 1818a99b3081e5a87a5c1ed72e6dba1760f2c1bc..aa462b480a394b0081ba73e2441d9cb4619e519b 100644 --- a/net/mac802154/Kconfig +++ b/net/mac802154/Kconfig @@ -16,5 +16,5 @@ config MAC802154 been tested yet! If you plan to use HardMAC IEEE 802.15.4 devices, you can - say N here. Alternatievly you can say M to compile it as + say N here. Alternatively you can say M to compile it as module. diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile index 9723d6f3f3e5b742e1d105b2667627949f1d7c7d..702d8b466317f4bae07b3770f9669288bebb9f7c 100644 --- a/net/mac802154/Makefile +++ b/net/mac802154/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_MAC802154) += mac802154.o -mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o \ - monitor.o wpan.o llsec.o +mac802154-objs := main.o rx.o tx.o mac_cmd.o mib.o \ + iface.o llsec.o util.o cfg.o ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..c035708ada160d5364417db8a4c3179819016d49 --- /dev/null +++ b/net/mac802154/cfg.c @@ -0,0 +1,210 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Authors: + * Alexander Aring + * + * Based on: net/mac80211/cfg.c + */ + +#include +#include + +#include "ieee802154_i.h" +#include "driver-ops.h" +#include "cfg.h" + +static struct net_device * +ieee802154_add_iface_deprecated(struct wpan_phy *wpan_phy, + const char *name, int type) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + struct net_device *dev; + + rtnl_lock(); + dev = ieee802154_if_add(local, name, type, + cpu_to_le64(0x0000000000000000ULL)); + rtnl_unlock(); + + return dev; +} + +static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy, + struct net_device *dev) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + + ieee802154_if_remove(sdata); +} + +static int +ieee802154_add_iface(struct wpan_phy *phy, const char *name, + enum nl802154_iftype type, __le64 extended_addr) +{ + struct ieee802154_local *local = wpan_phy_priv(phy); + struct net_device *err; + + err = ieee802154_if_add(local, name, type, extended_addr); + if (IS_ERR(err)) + return PTR_ERR(err); + + return 0; +} + +static int +ieee802154_del_iface(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev) +{ + ieee802154_if_remove(IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev)); + + return 0; +} + +static int +ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + int ret; + + ASSERT_RTNL(); + + /* check if phy support this setting */ + if (!(wpan_phy->channels_supported[page] & BIT(channel))) + return -EINVAL; + + ret = drv_set_channel(local, page, channel); + if (!ret) { + wpan_phy->current_page = page; + wpan_phy->current_channel = channel; + } + + return ret; +} + +static int +ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + __le16 pan_id) +{ + ASSERT_RTNL(); + + /* TODO + * I am not sure about to check here on broadcast pan_id. + * Broadcast is a valid setting, comment from 802.15.4: + * If this value is 0xffff, the device is not associated. + * + * This could useful to simple deassociate an device. + */ + if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST)) + return -EINVAL; + + wpan_dev->pan_id = pan_id; + return 0; +} + +static int +ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + u8 min_be, u8 max_be) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + + ASSERT_RTNL(); + + if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS)) + return -EOPNOTSUPP; + + wpan_dev->min_be = min_be; + wpan_dev->max_be = max_be; + return 0; +} + +static int +ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + __le16 short_addr) +{ + ASSERT_RTNL(); + + /* TODO + * I am not sure about to check here on broadcast short_addr. + * Broadcast is a valid setting, comment from 802.15.4: + * A value of 0xfffe indicates that the device has + * associated but has not been allocated an address. A + * value of 0xffff indicates that the device does not + * have a short address. + * + * I think we should allow to set these settings but + * don't allow to allow socket communication with it. + */ + if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) || + short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST)) + return -EINVAL; + + wpan_dev->short_addr = short_addr; + return 0; +} + +static int +ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + u8 max_csma_backoffs) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + + ASSERT_RTNL(); + + if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS)) + return -EOPNOTSUPP; + + wpan_dev->csma_retries = max_csma_backoffs; + return 0; +} + +static int +ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + s8 max_frame_retries) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + + ASSERT_RTNL(); + + if (!(local->hw.flags & IEEE802154_HW_FRAME_RETRIES)) + return -EOPNOTSUPP; + + wpan_dev->frame_retries = max_frame_retries; + return 0; +} + +static int +ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + bool mode) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + + ASSERT_RTNL(); + + if (!(local->hw.flags & IEEE802154_HW_LBT)) + return -EOPNOTSUPP; + + wpan_dev->lbt = mode; + return 0; +} + +const struct cfg802154_ops mac802154_config_ops = { + .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated, + .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated, + .add_virtual_intf = ieee802154_add_iface, + .del_virtual_intf = ieee802154_del_iface, + .set_channel = ieee802154_set_channel, + .set_pan_id = ieee802154_set_pan_id, + .set_short_addr = ieee802154_set_short_addr, + .set_backoff_exponent = ieee802154_set_backoff_exponent, + .set_max_csma_backoffs = ieee802154_set_max_csma_backoffs, + .set_max_frame_retries = ieee802154_set_max_frame_retries, + .set_lbt_mode = ieee802154_set_lbt_mode, +}; diff --git a/net/mac802154/cfg.h b/net/mac802154/cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..e2718f981e82d81be46edbda37f5ce2fb6d1b8d2 --- /dev/null +++ b/net/mac802154/cfg.h @@ -0,0 +1,9 @@ +/* mac802154 configuration hooks for cfg802154 + */ + +#ifndef __CFG_H +#define __CFG_H + +extern const struct cfg802154_ops mac802154_config_ops; + +#endif /* __CFG_H */ diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f21e864613d09b6d02c4ffe8f2b7e72e78b75e16 --- /dev/null +++ b/net/mac802154/driver-ops.h @@ -0,0 +1,222 @@ +#ifndef __MAC802154_DRVIER_OPS +#define __MAC802154_DRIVER_OPS + +#include +#include + +#include + +#include "ieee802154_i.h" + +static inline int +drv_xmit_async(struct ieee802154_local *local, struct sk_buff *skb) +{ + return local->ops->xmit_async(&local->hw, skb); +} + +static inline int +drv_xmit_sync(struct ieee802154_local *local, struct sk_buff *skb) +{ + /* don't allow other operations while sync xmit */ + ASSERT_RTNL(); + + might_sleep(); + + return local->ops->xmit_sync(&local->hw, skb); +} + +static inline int drv_start(struct ieee802154_local *local) +{ + might_sleep(); + + local->started = true; + smp_mb(); + + return local->ops->start(&local->hw); +} + +static inline void drv_stop(struct ieee802154_local *local) +{ + might_sleep(); + + local->ops->stop(&local->hw); + + /* sync away all work on the tasklet before clearing started */ + tasklet_disable(&local->tasklet); + tasklet_enable(&local->tasklet); + + barrier(); + + local->started = false; +} + +static inline int +drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel) +{ + might_sleep(); + + return local->ops->set_channel(&local->hw, page, channel); +} + +static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm) +{ + might_sleep(); + + if (!local->ops->set_txpower) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + return local->ops->set_txpower(&local->hw, dbm); +} + +static inline int drv_set_cca_mode(struct ieee802154_local *local, u8 cca_mode) +{ + might_sleep(); + + if (!local->ops->set_cca_mode) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + return local->ops->set_cca_mode(&local->hw, cca_mode); +} + +static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode) +{ + might_sleep(); + + if (!local->ops->set_lbt) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + return local->ops->set_lbt(&local->hw, mode); +} + +static inline int +drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level) +{ + might_sleep(); + + if (!local->ops->set_cca_ed_level) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + return local->ops->set_cca_ed_level(&local->hw, ed_level); +} + +static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id) +{ + struct ieee802154_hw_addr_filt filt; + + might_sleep(); + + if (!local->ops->set_hw_addr_filt) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + filt.pan_id = pan_id; + + return local->ops->set_hw_addr_filt(&local->hw, &filt, + IEEE802154_AFILT_PANID_CHANGED); +} + +static inline int +drv_set_extended_addr(struct ieee802154_local *local, __le64 extended_addr) +{ + struct ieee802154_hw_addr_filt filt; + + might_sleep(); + + if (!local->ops->set_hw_addr_filt) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + filt.ieee_addr = extended_addr; + + return local->ops->set_hw_addr_filt(&local->hw, &filt, + IEEE802154_AFILT_IEEEADDR_CHANGED); +} + +static inline int +drv_set_short_addr(struct ieee802154_local *local, __le16 short_addr) +{ + struct ieee802154_hw_addr_filt filt; + + might_sleep(); + + if (!local->ops->set_hw_addr_filt) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + filt.short_addr = short_addr; + + return local->ops->set_hw_addr_filt(&local->hw, &filt, + IEEE802154_AFILT_SADDR_CHANGED); +} + +static inline int +drv_set_pan_coord(struct ieee802154_local *local, bool is_coord) +{ + struct ieee802154_hw_addr_filt filt; + + might_sleep(); + + if (!local->ops->set_hw_addr_filt) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + filt.pan_coord = is_coord; + + return local->ops->set_hw_addr_filt(&local->hw, &filt, + IEEE802154_AFILT_PANC_CHANGED); +} + +static inline int +drv_set_csma_params(struct ieee802154_local *local, u8 min_be, u8 max_be, + u8 max_csma_backoffs) +{ + might_sleep(); + + if (!local->ops->set_csma_params) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + return local->ops->set_csma_params(&local->hw, min_be, max_be, + max_csma_backoffs); +} + +static inline int +drv_set_max_frame_retries(struct ieee802154_local *local, s8 max_frame_retries) +{ + might_sleep(); + + if (!local->ops->set_frame_retries) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + return local->ops->set_frame_retries(&local->hw, max_frame_retries); +} + +static inline int +drv_set_promiscuous_mode(struct ieee802154_local *local, bool on) +{ + might_sleep(); + + if (!local->ops->set_promiscuous_mode) { + WARN_ON(1); + return -EOPNOTSUPP; + } + + return local->ops->set_promiscuous_mode(&local->hw, on); +} + +#endif /* __MAC802154_DRVIER_OPS */ diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c deleted file mode 100644 index b36b2b99657870196437b616bb71c5a12f71ebb0..0000000000000000000000000000000000000000 --- a/net/mac802154/ieee802154_dev.c +++ /dev/null @@ -1,415 +0,0 @@ -/* - * Copyright (C) 2007-2012 Siemens AG - * - * Written by: - * Alexander Smirnov - * - * Based on the code from 'linux-zigbee.sourceforge.net' project. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "mac802154.h" - -int mac802154_slave_open(struct net_device *dev) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - struct mac802154_sub_if_data *subif; - struct mac802154_priv *ipriv = priv->hw; - int res = 0; - - ASSERT_RTNL(); - - if (priv->type == IEEE802154_DEV_WPAN) { - mutex_lock(&priv->hw->slaves_mtx); - list_for_each_entry(subif, &priv->hw->slaves, list) { - if (subif != priv && subif->type == priv->type && - subif->running) { - mutex_unlock(&priv->hw->slaves_mtx); - return -EBUSY; - } - } - mutex_unlock(&priv->hw->slaves_mtx); - } - - mutex_lock(&priv->hw->slaves_mtx); - priv->running = true; - mutex_unlock(&priv->hw->slaves_mtx); - - if (ipriv->open_count++ == 0) { - res = ipriv->ops->start(&ipriv->hw); - WARN_ON(res); - if (res) - goto err; - } - - if (ipriv->ops->ieee_addr) { - __le64 addr = ieee802154_devaddr_from_raw(dev->dev_addr); - - res = ipriv->ops->ieee_addr(&ipriv->hw, addr); - WARN_ON(res); - if (res) - goto err; - mac802154_dev_set_ieee_addr(dev); - } - - netif_start_queue(dev); - return 0; -err: - priv->hw->open_count--; - - return res; -} - -int mac802154_slave_close(struct net_device *dev) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - struct mac802154_priv *ipriv = priv->hw; - - ASSERT_RTNL(); - - netif_stop_queue(dev); - - mutex_lock(&priv->hw->slaves_mtx); - priv->running = false; - mutex_unlock(&priv->hw->slaves_mtx); - - if (!--ipriv->open_count) - ipriv->ops->stop(&ipriv->hw); - - return 0; -} - -static int -mac802154_netdev_register(struct wpan_phy *phy, struct net_device *dev) -{ - struct mac802154_sub_if_data *priv; - struct mac802154_priv *ipriv; - int err; - - ipriv = wpan_phy_priv(phy); - - priv = netdev_priv(dev); - priv->dev = dev; - priv->hw = ipriv; - - dev->needed_headroom = ipriv->hw.extra_tx_headroom; - - SET_NETDEV_DEV(dev, &ipriv->phy->dev); - - mutex_lock(&ipriv->slaves_mtx); - if (!ipriv->running) { - mutex_unlock(&ipriv->slaves_mtx); - return -ENODEV; - } - mutex_unlock(&ipriv->slaves_mtx); - - err = register_netdev(dev); - if (err < 0) - return err; - - rtnl_lock(); - mutex_lock(&ipriv->slaves_mtx); - list_add_tail_rcu(&priv->list, &ipriv->slaves); - mutex_unlock(&ipriv->slaves_mtx); - rtnl_unlock(); - - return 0; -} - -static void -mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev) -{ - struct mac802154_sub_if_data *sdata; - - ASSERT_RTNL(); - - sdata = netdev_priv(dev); - - BUG_ON(sdata->hw->phy != phy); - - mutex_lock(&sdata->hw->slaves_mtx); - list_del_rcu(&sdata->list); - mutex_unlock(&sdata->hw->slaves_mtx); - - synchronize_rcu(); - unregister_netdevice(sdata->dev); -} - -static struct net_device * -mac802154_add_iface(struct wpan_phy *phy, const char *name, int type) -{ - struct net_device *dev; - int err = -ENOMEM; - - switch (type) { - case IEEE802154_DEV_MONITOR: - dev = alloc_netdev(sizeof(struct mac802154_sub_if_data), - name, NET_NAME_UNKNOWN, - mac802154_monitor_setup); - break; - case IEEE802154_DEV_WPAN: - dev = alloc_netdev(sizeof(struct mac802154_sub_if_data), - name, NET_NAME_UNKNOWN, - mac802154_wpan_setup); - break; - default: - dev = NULL; - err = -EINVAL; - break; - } - if (!dev) - goto err; - - err = mac802154_netdev_register(phy, dev); - if (err) - goto err_free; - - dev_hold(dev); /* we return an incremented device refcount */ - return dev; - -err_free: - free_netdev(dev); -err: - return ERR_PTR(err); -} - -static int mac802154_set_txpower(struct wpan_phy *phy, int db) -{ - struct mac802154_priv *priv = wpan_phy_priv(phy); - - return priv->ops->set_txpower(&priv->hw, db); -} - -static int mac802154_set_lbt(struct wpan_phy *phy, bool on) -{ - struct mac802154_priv *priv = wpan_phy_priv(phy); - - return priv->ops->set_lbt(&priv->hw, on); -} - -static int mac802154_set_cca_mode(struct wpan_phy *phy, u8 mode) -{ - struct mac802154_priv *priv = wpan_phy_priv(phy); - - return priv->ops->set_cca_mode(&priv->hw, mode); -} - -static int mac802154_set_cca_ed_level(struct wpan_phy *phy, s32 level) -{ - struct mac802154_priv *priv = wpan_phy_priv(phy); - - return priv->ops->set_cca_ed_level(&priv->hw, level); -} - -static int mac802154_set_csma_params(struct wpan_phy *phy, u8 min_be, - u8 max_be, u8 retries) -{ - struct mac802154_priv *priv = wpan_phy_priv(phy); - - return priv->ops->set_csma_params(&priv->hw, min_be, max_be, retries); -} - -static int mac802154_set_frame_retries(struct wpan_phy *phy, s8 retries) -{ - struct mac802154_priv *priv = wpan_phy_priv(phy); - - return priv->ops->set_frame_retries(&priv->hw, retries); -} - -struct ieee802154_dev * -ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops) -{ - struct wpan_phy *phy; - struct mac802154_priv *priv; - size_t priv_size; - - if (!ops || !ops->xmit || !ops->ed || !ops->start || - !ops->stop || !ops->set_channel) { - pr_err("undefined IEEE802.15.4 device operations\n"); - return NULL; - } - - /* Ensure 32-byte alignment of our private data and hw private data. - * We use the wpan_phy priv data for both our mac802154_priv and for - * the driver's private data - * - * in memory it'll be like this: - * - * +-----------------------+ - * | struct wpan_phy | - * +-----------------------+ - * | struct mac802154_priv | - * +-----------------------+ - * | driver's private data | - * +-----------------------+ - * - * Due to ieee802154 layer isn't aware of driver and MAC structures, - * so lets allign them here. - */ - - priv_size = ALIGN(sizeof(*priv), NETDEV_ALIGN) + priv_data_len; - - phy = wpan_phy_alloc(priv_size); - if (!phy) { - pr_err("failure to allocate master IEEE802.15.4 device\n"); - return NULL; - } - - priv = wpan_phy_priv(phy); - priv->phy = phy; - priv->hw.phy = priv->phy; - priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN); - priv->ops = ops; - - INIT_LIST_HEAD(&priv->slaves); - mutex_init(&priv->slaves_mtx); - - return &priv->hw; -} -EXPORT_SYMBOL(ieee802154_alloc_device); - -void ieee802154_free_device(struct ieee802154_dev *hw) -{ - struct mac802154_priv *priv = mac802154_to_priv(hw); - - BUG_ON(!list_empty(&priv->slaves)); - - mutex_destroy(&priv->slaves_mtx); - - wpan_phy_free(priv->phy); -} -EXPORT_SYMBOL(ieee802154_free_device); - -int ieee802154_register_device(struct ieee802154_dev *dev) -{ - struct mac802154_priv *priv = mac802154_to_priv(dev); - int rc = -ENOSYS; - - if (dev->flags & IEEE802154_HW_TXPOWER) { - if (!priv->ops->set_txpower) - goto out; - - priv->phy->set_txpower = mac802154_set_txpower; - } - - if (dev->flags & IEEE802154_HW_LBT) { - if (!priv->ops->set_lbt) - goto out; - - priv->phy->set_lbt = mac802154_set_lbt; - } - - if (dev->flags & IEEE802154_HW_CCA_MODE) { - if (!priv->ops->set_cca_mode) - goto out; - - priv->phy->set_cca_mode = mac802154_set_cca_mode; - } - - if (dev->flags & IEEE802154_HW_CCA_ED_LEVEL) { - if (!priv->ops->set_cca_ed_level) - goto out; - - priv->phy->set_cca_ed_level = mac802154_set_cca_ed_level; - } - - if (dev->flags & IEEE802154_HW_CSMA_PARAMS) { - if (!priv->ops->set_csma_params) - goto out; - - priv->phy->set_csma_params = mac802154_set_csma_params; - } - - if (dev->flags & IEEE802154_HW_FRAME_RETRIES) { - if (!priv->ops->set_frame_retries) - goto out; - - priv->phy->set_frame_retries = mac802154_set_frame_retries; - } - - priv->dev_workqueue = - create_singlethread_workqueue(wpan_phy_name(priv->phy)); - if (!priv->dev_workqueue) { - rc = -ENOMEM; - goto out; - } - - wpan_phy_set_dev(priv->phy, priv->hw.parent); - - priv->phy->add_iface = mac802154_add_iface; - priv->phy->del_iface = mac802154_del_iface; - - rc = wpan_phy_register(priv->phy); - if (rc < 0) - goto out_wq; - - rtnl_lock(); - - mutex_lock(&priv->slaves_mtx); - priv->running = MAC802154_DEVICE_RUN; - mutex_unlock(&priv->slaves_mtx); - - rtnl_unlock(); - - return 0; - -out_wq: - destroy_workqueue(priv->dev_workqueue); -out: - return rc; -} -EXPORT_SYMBOL(ieee802154_register_device); - -void ieee802154_unregister_device(struct ieee802154_dev *dev) -{ - struct mac802154_priv *priv = mac802154_to_priv(dev); - struct mac802154_sub_if_data *sdata, *next; - - flush_workqueue(priv->dev_workqueue); - destroy_workqueue(priv->dev_workqueue); - - rtnl_lock(); - - mutex_lock(&priv->slaves_mtx); - priv->running = MAC802154_DEVICE_STOPPED; - mutex_unlock(&priv->slaves_mtx); - - list_for_each_entry_safe(sdata, next, &priv->slaves, list) { - mutex_lock(&sdata->hw->slaves_mtx); - list_del(&sdata->list); - mutex_unlock(&sdata->hw->slaves_mtx); - - unregister_netdevice(sdata->dev); - } - - rtnl_unlock(); - - wpan_phy_unregister(priv->phy); -} -EXPORT_SYMBOL(ieee802154_unregister_device); - -MODULE_DESCRIPTION("IEEE 802.15.4 implementation"); -MODULE_LICENSE("GPL v2"); diff --git a/net/mac802154/mac802154.h b/net/mac802154/ieee802154_i.h similarity index 63% rename from net/mac802154/mac802154.h rename to net/mac802154/ieee802154_i.h index 762a6f849c6b7d3edf3e8677ee6f2081c10c8fac..bebd70ffc7a3d101551f023e515db24033883f0b 100644 --- a/net/mac802154/mac802154.h +++ b/net/mac802154/ieee802154_i.h @@ -10,29 +10,28 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Pavel Smolenskiy * Maxim Gorbachyov * Dmitry Eremin-Solenikov * Alexander Smirnov */ -#ifndef MAC802154_H -#define MAC802154_H +#ifndef __IEEE802154_I_H +#define __IEEE802154_I_H #include +#include +#include #include +#include #include #include "llsec.h" /* mac802154 device private data */ -struct mac802154_priv { - struct ieee802154_dev hw; - struct ieee802154_ops *ops; +struct ieee802154_local { + struct ieee802154_hw hw; + const struct ieee802154_ops *ops; /* ieee802154 phy */ struct wpan_phy *phy; @@ -46,23 +45,29 @@ struct mac802154_priv { * * So atomic readers can use any of this protection methods. */ - struct list_head slaves; - struct mutex slaves_mtx; + struct list_head interfaces; + struct mutex iflist_mtx; /* This one is used for scanning and other jobs not to be interfered * with serial driver. */ - struct workqueue_struct *dev_workqueue; + struct workqueue_struct *workqueue; - /* SoftMAC device is registered and running. One can add subinterfaces. - * This flag should be modified under slaves_mtx and RTNL, so you can - * read them using any of protection methods. - */ - bool running; + struct hrtimer ifs_timer; + + bool started; + + struct tasklet_struct tasklet; + struct sk_buff_head skb_queue; +}; + +enum { + IEEE802154_RX_MSG = 1, }; -#define MAC802154_DEVICE_STOPPED 0x00 -#define MAC802154_DEVICE_RUN 0x01 +enum ieee802154_sdata_state_bits { + SDATA_STATE_RUNNING, +}; /* Slave interface definition. * @@ -70,72 +75,74 @@ struct mac802154_priv { * Each ieee802154 device/transceiver may have several slaves and able * to be associated with several networks at the same time. */ -struct mac802154_sub_if_data { +struct ieee802154_sub_if_data { struct list_head list; /* the ieee802154_priv->slaves list */ - struct mac802154_priv *hw; + struct wpan_dev wpan_dev; + + struct ieee802154_local *local; struct net_device *dev; - int type; - bool running; + unsigned long state; + char name[IFNAMSIZ]; spinlock_t mib_lock; - __le16 pan_id; - __le16 short_addr; - __le64 extended_addr; - - u8 chan; - u8 page; - - struct ieee802154_mac_params mac_params; - - /* MAC BSN field */ - u8 bsn; - /* MAC DSN field */ - u8 dsn; - /* protects sec from concurrent access by netlink. access by * encrypt/decrypt/header_create safe without additional protection. */ struct mutex sec_mtx; struct mac802154_llsec sec; + /* must be last, dynamically sized area in this! */ + struct ieee802154_vif vif; }; -#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw) - #define MAC802154_CHAN_NONE 0xff /* No channel is assigned */ -extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced; -extern struct ieee802154_mlme_ops mac802154_mlme_wpan; - -int mac802154_slave_open(struct net_device *dev); -int mac802154_slave_close(struct net_device *dev); +/* utility functions/constants */ +extern const void *const mac802154_wpan_phy_privid; /* for wpan_phy privid */ + +static inline struct ieee802154_local * +hw_to_local(struct ieee802154_hw *hw) +{ + return container_of(hw, struct ieee802154_local, hw); +} + +static inline struct ieee802154_sub_if_data * +IEEE802154_DEV_TO_SUB_IF(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +static inline struct ieee802154_sub_if_data * +IEEE802154_WPAN_DEV_TO_SUB_IF(struct wpan_dev *wpan_dev) +{ + return container_of(wpan_dev, struct ieee802154_sub_if_data, wpan_dev); +} + +static inline bool +ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata) +{ + return test_bit(SDATA_STATE_RUNNING, &sdata->state); +} -void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb); -void mac802154_monitor_setup(struct net_device *dev); - -void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb); -void mac802154_wpan_setup(struct net_device *dev); +extern struct ieee802154_mlme_ops mac802154_mlme_wpan; -netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, - u8 page, u8 chan); +netdev_tx_t +ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t +ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); +enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer); /* MIB callbacks */ void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val); __le16 mac802154_dev_get_short_addr(const struct net_device *dev); -void mac802154_dev_set_ieee_addr(struct net_device *dev); __le16 mac802154_dev_get_pan_id(const struct net_device *dev); void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val); void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan); u8 mac802154_dev_get_dsn(const struct net_device *dev); -int mac802154_set_mac_params(struct net_device *dev, - const struct ieee802154_mac_params *params); -void mac802154_get_mac_params(struct net_device *dev, - struct ieee802154_mac_params *params); - int mac802154_get_params(struct net_device *dev, struct ieee802154_llsec_params *params); int mac802154_set_params(struct net_device *dev, @@ -169,4 +176,13 @@ void mac802154_get_table(struct net_device *dev, struct ieee802154_llsec_table **t); void mac802154_unlock_table(struct net_device *dev); -#endif /* MAC802154_H */ +/* interface handling */ +int ieee802154_iface_init(void); +void ieee802154_iface_exit(void); +void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata); +struct net_device * +ieee802154_if_add(struct ieee802154_local *local, const char *name, + enum nl802154_iftype type, __le64 extended_addr); +void ieee802154_remove_interfaces(struct ieee802154_local *local); + +#endif /* __IEEE802154_I_H */ diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c new file mode 100644 index 0000000000000000000000000000000000000000..9ae893057dd761e4e82f563b9f24b4c59e6254bc --- /dev/null +++ b/net/mac802154/iface.c @@ -0,0 +1,586 @@ +/* + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Dmitry Eremin-Solenikov + * Sergey Lapin + * Maxim Gorbachyov + * Alexander Smirnov + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ieee802154_i.h" +#include "driver-ops.h" + +static int mac802154_wpan_update_llsec(struct net_device *dev) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); + struct wpan_dev *wpan_dev = &sdata->wpan_dev; + int rc = 0; + + if (ops->llsec) { + struct ieee802154_llsec_params params; + int changed = 0; + + params.pan_id = wpan_dev->pan_id; + changed |= IEEE802154_LLSEC_PARAM_PAN_ID; + + params.hwaddr = wpan_dev->extended_addr; + changed |= IEEE802154_LLSEC_PARAM_HWADDR; + + rc = ops->llsec->set_params(dev, ¶ms, changed); + } + + return rc; +} + +static int +mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct wpan_dev *wpan_dev = &sdata->wpan_dev; + struct sockaddr_ieee802154 *sa = + (struct sockaddr_ieee802154 *)&ifr->ifr_addr; + int err = -ENOIOCTLCMD; + + ASSERT_RTNL(); + + spin_lock_bh(&sdata->mib_lock); + + switch (cmd) { + case SIOCGIFADDR: + { + u16 pan_id, short_addr; + + pan_id = le16_to_cpu(wpan_dev->pan_id); + short_addr = le16_to_cpu(wpan_dev->short_addr); + if (pan_id == IEEE802154_PANID_BROADCAST || + short_addr == IEEE802154_ADDR_BROADCAST) { + err = -EADDRNOTAVAIL; + break; + } + + sa->family = AF_IEEE802154; + sa->addr.addr_type = IEEE802154_ADDR_SHORT; + sa->addr.pan_id = pan_id; + sa->addr.short_addr = short_addr; + + err = 0; + break; + } + case SIOCSIFADDR: + if (netif_running(dev)) { + spin_unlock_bh(&sdata->mib_lock); + return -EBUSY; + } + + dev_warn(&dev->dev, + "Using DEBUGing ioctl SIOCSIFADDR isn't recommended!\n"); + if (sa->family != AF_IEEE802154 || + sa->addr.addr_type != IEEE802154_ADDR_SHORT || + sa->addr.pan_id == IEEE802154_PANID_BROADCAST || + sa->addr.short_addr == IEEE802154_ADDR_BROADCAST || + sa->addr.short_addr == IEEE802154_ADDR_UNDEF) { + err = -EINVAL; + break; + } + + wpan_dev->pan_id = cpu_to_le16(sa->addr.pan_id); + wpan_dev->short_addr = cpu_to_le16(sa->addr.short_addr); + + err = mac802154_wpan_update_llsec(dev); + break; + } + + spin_unlock_bh(&sdata->mib_lock); + return err; +} + +static int mac802154_wpan_mac_addr(struct net_device *dev, void *p) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct sockaddr *addr = p; + __le64 extended_addr; + + if (netif_running(dev)) + return -EBUSY; + + ieee802154_be64_to_le64(&extended_addr, addr->sa_data); + if (!ieee802154_is_valid_extended_addr(extended_addr)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + sdata->wpan_dev.extended_addr = extended_addr; + + return mac802154_wpan_update_llsec(dev); +} + +static int mac802154_slave_open(struct net_device *dev) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct ieee802154_sub_if_data *subif; + struct ieee802154_local *local = sdata->local; + int res = 0; + + ASSERT_RTNL(); + + if (sdata->vif.type == NL802154_IFTYPE_NODE) { + mutex_lock(&sdata->local->iflist_mtx); + list_for_each_entry(subif, &sdata->local->interfaces, list) { + if (subif != sdata && + subif->vif.type == sdata->vif.type && + ieee802154_sdata_running(subif)) { + mutex_unlock(&sdata->local->iflist_mtx); + return -EBUSY; + } + } + mutex_unlock(&sdata->local->iflist_mtx); + } + + set_bit(SDATA_STATE_RUNNING, &sdata->state); + + if (!local->open_count) { + res = drv_start(local); + WARN_ON(res); + if (res) + goto err; + } + + local->open_count++; + netif_start_queue(dev); + return 0; +err: + /* might already be clear but that doesn't matter */ + clear_bit(SDATA_STATE_RUNNING, &sdata->state); + + return res; +} + +static int mac802154_wpan_open(struct net_device *dev) +{ + int rc; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct ieee802154_local *local = sdata->local; + struct wpan_dev *wpan_dev = &sdata->wpan_dev; + struct wpan_phy *phy = sdata->local->phy; + + rc = mac802154_slave_open(dev); + if (rc < 0) + return rc; + + mutex_lock(&phy->pib_lock); + + if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) { + rc = drv_set_promiscuous_mode(local, + wpan_dev->promiscuous_mode); + if (rc < 0) + goto out; + } + + if (local->hw.flags & IEEE802154_HW_AFILT) { + rc = drv_set_pan_id(local, wpan_dev->pan_id); + if (rc < 0) + goto out; + + rc = drv_set_extended_addr(local, wpan_dev->extended_addr); + if (rc < 0) + goto out; + + rc = drv_set_short_addr(local, wpan_dev->short_addr); + if (rc < 0) + goto out; + } + + if (local->hw.flags & IEEE802154_HW_LBT) { + rc = drv_set_lbt_mode(local, wpan_dev->lbt); + if (rc < 0) + goto out; + } + + if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) { + rc = drv_set_csma_params(local, wpan_dev->min_be, + wpan_dev->max_be, + wpan_dev->csma_retries); + if (rc < 0) + goto out; + } + + if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) { + rc = drv_set_max_frame_retries(local, wpan_dev->frame_retries); + if (rc < 0) + goto out; + } + + mutex_unlock(&phy->pib_lock); + return 0; + +out: + mutex_unlock(&phy->pib_lock); + return rc; +} + +static int mac802154_slave_close(struct net_device *dev) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct ieee802154_local *local = sdata->local; + + ASSERT_RTNL(); + + hrtimer_cancel(&local->ifs_timer); + + netif_stop_queue(dev); + local->open_count--; + + clear_bit(SDATA_STATE_RUNNING, &sdata->state); + + if (!local->open_count) + drv_stop(local); + + return 0; +} + +static int mac802154_set_header_security(struct ieee802154_sub_if_data *sdata, + struct ieee802154_hdr *hdr, + const struct ieee802154_mac_cb *cb) +{ + struct ieee802154_llsec_params params; + u8 level; + + mac802154_llsec_get_params(&sdata->sec, ¶ms); + + if (!params.enabled && cb->secen_override && cb->secen) + return -EINVAL; + if (!params.enabled || + (cb->secen_override && !cb->secen) || + !params.out_level) + return 0; + if (cb->seclevel_override && !cb->seclevel) + return -EINVAL; + + level = cb->seclevel_override ? cb->seclevel : params.out_level; + + hdr->fc.security_enabled = 1; + hdr->sec.level = level; + hdr->sec.key_id_mode = params.out_key.mode; + if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX) + hdr->sec.short_src = params.out_key.short_source; + else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX) + hdr->sec.extended_src = params.out_key.extended_source; + hdr->sec.key_id = params.out_key.id; + + return 0; +} + +static int mac802154_header_create(struct sk_buff *skb, + struct net_device *dev, + unsigned short type, + const void *daddr, + const void *saddr, + unsigned len) +{ + struct ieee802154_hdr hdr; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct wpan_dev *wpan_dev = &sdata->wpan_dev; + struct ieee802154_mac_cb *cb = mac_cb(skb); + int hlen; + + if (!daddr) + return -EINVAL; + + memset(&hdr.fc, 0, sizeof(hdr.fc)); + hdr.fc.type = cb->type; + hdr.fc.security_enabled = cb->secen; + hdr.fc.ack_request = cb->ackreq; + hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev); + + if (mac802154_set_header_security(sdata, &hdr, cb) < 0) + return -EINVAL; + + if (!saddr) { + spin_lock_bh(&sdata->mib_lock); + + if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) || + wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) || + wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) { + hdr.source.mode = IEEE802154_ADDR_LONG; + hdr.source.extended_addr = wpan_dev->extended_addr; + } else { + hdr.source.mode = IEEE802154_ADDR_SHORT; + hdr.source.short_addr = wpan_dev->short_addr; + } + + hdr.source.pan_id = wpan_dev->pan_id; + + spin_unlock_bh(&sdata->mib_lock); + } else { + hdr.source = *(const struct ieee802154_addr *)saddr; + } + + hdr.dest = *(const struct ieee802154_addr *)daddr; + + hlen = ieee802154_hdr_push(skb, &hdr); + if (hlen < 0) + return -EINVAL; + + skb_reset_mac_header(skb); + skb->mac_len = hlen; + + if (len > ieee802154_max_payload(&hdr)) + return -EMSGSIZE; + + return hlen; +} + +static int +mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr) +{ + struct ieee802154_hdr hdr; + struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr; + + if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) { + pr_debug("malformed packet\n"); + return 0; + } + + *addr = hdr.source; + return sizeof(*addr); +} + +static struct header_ops mac802154_header_ops = { + .create = mac802154_header_create, + .parse = mac802154_header_parse, +}; + +static const struct net_device_ops mac802154_wpan_ops = { + .ndo_open = mac802154_wpan_open, + .ndo_stop = mac802154_slave_close, + .ndo_start_xmit = ieee802154_subif_start_xmit, + .ndo_do_ioctl = mac802154_wpan_ioctl, + .ndo_set_mac_address = mac802154_wpan_mac_addr, +}; + +static const struct net_device_ops mac802154_monitor_ops = { + .ndo_open = mac802154_wpan_open, + .ndo_stop = mac802154_slave_close, + .ndo_start_xmit = ieee802154_monitor_start_xmit, +}; + +static void mac802154_wpan_free(struct net_device *dev) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + + mac802154_llsec_destroy(&sdata->sec); + + free_netdev(dev); +} + +static void ieee802154_if_setup(struct net_device *dev) +{ + dev->addr_len = IEEE802154_EXTENDED_ADDR_LEN; + memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN); + + dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN; + dev->needed_tailroom = 2 + 16; /* FCS + MIC */ + dev->mtu = IEEE802154_MTU; + dev->tx_queue_len = 300; + dev->flags = IFF_NOARP | IFF_BROADCAST; +} + +static int +ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, + enum nl802154_iftype type) +{ + struct wpan_dev *wpan_dev = &sdata->wpan_dev; + + /* set some type-dependent values */ + sdata->vif.type = type; + sdata->wpan_dev.iftype = type; + + get_random_bytes(&wpan_dev->bsn, 1); + get_random_bytes(&wpan_dev->dsn, 1); + + /* defaults per 802.15.4-2011 */ + wpan_dev->min_be = 3; + wpan_dev->max_be = 5; + wpan_dev->csma_retries = 4; + /* for compatibility, actual default is 3 */ + wpan_dev->frame_retries = -1; + + wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); + wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); + + switch (type) { + case NL802154_IFTYPE_NODE: + ieee802154_be64_to_le64(&wpan_dev->extended_addr, + sdata->dev->dev_addr); + + sdata->dev->header_ops = &mac802154_header_ops; + sdata->dev->destructor = mac802154_wpan_free; + sdata->dev->netdev_ops = &mac802154_wpan_ops; + sdata->dev->ml_priv = &mac802154_mlme_wpan; + wpan_dev->promiscuous_mode = false; + + spin_lock_init(&sdata->mib_lock); + mutex_init(&sdata->sec_mtx); + + mac802154_llsec_init(&sdata->sec); + break; + case NL802154_IFTYPE_MONITOR: + sdata->dev->destructor = free_netdev; + sdata->dev->netdev_ops = &mac802154_monitor_ops; + wpan_dev->promiscuous_mode = true; + break; + default: + BUG(); + } + + return 0; +} + +struct net_device * +ieee802154_if_add(struct ieee802154_local *local, const char *name, + enum nl802154_iftype type, __le64 extended_addr) +{ + struct net_device *ndev = NULL; + struct ieee802154_sub_if_data *sdata = NULL; + int ret = -ENOMEM; + + ASSERT_RTNL(); + + ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, name, + NET_NAME_UNKNOWN, ieee802154_if_setup); + if (!ndev) + return ERR_PTR(-ENOMEM); + + ndev->needed_headroom = local->hw.extra_tx_headroom; + + ret = dev_alloc_name(ndev, ndev->name); + if (ret < 0) + goto err; + + ieee802154_le64_to_be64(ndev->perm_addr, + &local->hw.phy->perm_extended_addr); + switch (type) { + case NL802154_IFTYPE_NODE: + ndev->type = ARPHRD_IEEE802154; + if (ieee802154_is_valid_extended_addr(extended_addr)) + ieee802154_le64_to_be64(ndev->dev_addr, &extended_addr); + else + memcpy(ndev->dev_addr, ndev->perm_addr, + IEEE802154_EXTENDED_ADDR_LEN); + break; + case NL802154_IFTYPE_MONITOR: + ndev->type = ARPHRD_IEEE802154_MONITOR; + break; + default: + ret = -EINVAL; + goto err; + } + + /* TODO check this */ + SET_NETDEV_DEV(ndev, &local->phy->dev); + sdata = netdev_priv(ndev); + ndev->ieee802154_ptr = &sdata->wpan_dev; + memcpy(sdata->name, ndev->name, IFNAMSIZ); + sdata->dev = ndev; + sdata->wpan_dev.wpan_phy = local->hw.phy; + sdata->local = local; + + /* setup type-dependent data */ + ret = ieee802154_setup_sdata(sdata, type); + if (ret) + goto err; + + ret = register_netdevice(ndev); + if (ret < 0) + goto err; + + mutex_lock(&local->iflist_mtx); + list_add_tail_rcu(&sdata->list, &local->interfaces); + mutex_unlock(&local->iflist_mtx); + + return ndev; + +err: + free_netdev(ndev); + return ERR_PTR(ret); +} + +void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata) +{ + ASSERT_RTNL(); + + mutex_lock(&sdata->local->iflist_mtx); + list_del_rcu(&sdata->list); + mutex_unlock(&sdata->local->iflist_mtx); + + synchronize_rcu(); + unregister_netdevice(sdata->dev); +} + +void ieee802154_remove_interfaces(struct ieee802154_local *local) +{ + struct ieee802154_sub_if_data *sdata, *tmp; + + mutex_lock(&local->iflist_mtx); + list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { + list_del(&sdata->list); + + unregister_netdevice(sdata->dev); + } + mutex_unlock(&local->iflist_mtx); +} + +static int netdev_notify(struct notifier_block *nb, + unsigned long state, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ieee802154_sub_if_data *sdata; + + if (state != NETDEV_CHANGENAME) + return NOTIFY_DONE; + + if (!dev->ieee802154_ptr || !dev->ieee802154_ptr->wpan_phy) + return NOTIFY_DONE; + + if (dev->ieee802154_ptr->wpan_phy->privid != mac802154_wpan_phy_privid) + return NOTIFY_DONE; + + sdata = IEEE802154_DEV_TO_SUB_IF(dev); + memcpy(sdata->name, dev->name, IFNAMSIZ); + + return NOTIFY_OK; +} + +static struct notifier_block mac802154_netdev_notifier = { + .notifier_call = netdev_notify, +}; + +int ieee802154_iface_init(void) +{ + return register_netdevice_notifier(&mac802154_netdev_notifier); +} + +void ieee802154_iface_exit(void) +{ + unregister_netdevice_notifier(&mac802154_netdev_notifier); +} diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index 457058142098376bb9731600a9c162aaf4247b64..dcf73958133a0d6d0c28f825f648b13cc9ef478b 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -17,10 +17,10 @@ #include #include #include -#include +#include #include -#include "mac802154.h" +#include "ieee802154_i.h" #include "llsec.h" static void llsec_key_put(struct mac802154_llsec_key *key); @@ -75,8 +75,6 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec) } } - - int mac802154_llsec_get_params(struct mac802154_llsec *sec, struct ieee802154_llsec_params *params) { @@ -117,8 +115,6 @@ int mac802154_llsec_set_params(struct mac802154_llsec *sec, return 0; } - - static struct mac802154_llsec_key* llsec_key_alloc(const struct ieee802154_llsec_key *template) { @@ -294,8 +290,6 @@ int mac802154_llsec_key_del(struct mac802154_llsec *sec, return -ENOENT; } - - static bool llsec_dev_use_shortaddr(__le16 short_addr) { return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) && @@ -304,12 +298,12 @@ static bool llsec_dev_use_shortaddr(__le16 short_addr) static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id) { - return ((__force u16) short_addr) << 16 | (__force u16) pan_id; + return ((__force u16)short_addr) << 16 | (__force u16)pan_id; } static u64 llsec_dev_hash_long(__le64 hwaddr) { - return (__force u64) hwaddr; + return (__force u64)hwaddr; } static struct mac802154_llsec_device* @@ -411,8 +405,6 @@ int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr) return 0; } - - static struct mac802154_llsec_device_key* llsec_devkey_find(struct mac802154_llsec_device *dev, const struct ieee802154_llsec_key_id *key) @@ -475,8 +467,6 @@ int mac802154_llsec_devkey_del(struct mac802154_llsec *sec, return 0; } - - static struct mac802154_llsec_seclevel* llsec_find_seclevel(const struct mac802154_llsec *sec, const struct ieee802154_llsec_seclevel *sl) @@ -532,8 +522,6 @@ int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec, return 0; } - - static int llsec_recover_addr(struct mac802154_llsec *sec, struct ieee802154_addr *addr) { @@ -609,7 +597,6 @@ found: return llsec_key_get(key); } - static void llsec_geniv(u8 iv[16], __le64 addr, const struct ieee802154_sechdr *sec) { @@ -786,8 +773,6 @@ fail: return rc; } - - static struct mac802154_llsec_device* llsec_lookup_dev(struct mac802154_llsec *sec, const struct ieee802154_addr *addr) diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c index bf809131eef776209040a8496ed1e2214e6bdebe..6aacb181688961178bab9849c23425a7a3e25478 100644 --- a/net/mac802154/mac_cmd.c +++ b/net/mac802154/mac_cmd.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin * Dmitry Eremin-Solenikov @@ -24,14 +20,14 @@ #include #include +#include -#include #include -#include +#include #include -#include -#include "mac802154.h" +#include "ieee802154_i.h" +#include "driver-ops.h" static int mac802154_mlme_start_req(struct net_device *dev, struct ieee802154_addr *addr, @@ -43,11 +39,12 @@ static int mac802154_mlme_start_req(struct net_device *dev, struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); int rc = 0; + ASSERT_RTNL(); + BUG_ON(addr->mode != IEEE802154_ADDR_SHORT); mac802154_dev_set_pan_id(dev, addr->pan_id); mac802154_dev_set_short_addr(dev, addr->short_addr); - mac802154_dev_set_ieee_addr(dev); mac802154_dev_set_page_channel(dev, page, channel); if (ops->llsec) { @@ -69,21 +66,71 @@ static int mac802154_mlme_start_req(struct net_device *dev, rc = ops->llsec->set_params(dev, ¶ms, changed); } - /* FIXME: add validation for unused parameters to be sane - * for SoftMAC - */ - ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS); - return rc; } -static struct wpan_phy *mac802154_get_phy(const struct net_device *dev) +static int mac802154_set_mac_params(struct net_device *dev, + const struct ieee802154_mac_params *params) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct ieee802154_local *local = sdata->local; + struct wpan_dev *wpan_dev = &sdata->wpan_dev; + int ret; + + ASSERT_RTNL(); + + /* PHY */ + wpan_dev->wpan_phy->transmit_power = params->transmit_power; + wpan_dev->wpan_phy->cca_mode = params->cca_mode; + wpan_dev->wpan_phy->cca_ed_level = params->cca_ed_level; + + /* MAC */ + wpan_dev->min_be = params->min_be; + wpan_dev->max_be = params->max_be; + wpan_dev->csma_retries = params->csma_retries; + wpan_dev->frame_retries = params->frame_retries; + wpan_dev->lbt = params->lbt; + + if (local->hw.flags & IEEE802154_HW_TXPOWER) { + ret = drv_set_tx_power(local, params->transmit_power); + if (ret < 0) + return ret; + } + + if (local->hw.flags & IEEE802154_HW_CCA_MODE) { + ret = drv_set_cca_mode(local, params->cca_mode); + if (ret < 0) + return ret; + } - BUG_ON(dev->type != ARPHRD_IEEE802154); + if (local->hw.flags & IEEE802154_HW_CCA_ED_LEVEL) { + ret = drv_set_cca_ed_level(local, params->cca_ed_level); + if (ret < 0) + return ret; + } + + return 0; +} - return to_phy(get_device(&priv->hw->phy->dev)); +static void mac802154_get_mac_params(struct net_device *dev, + struct ieee802154_mac_params *params) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct wpan_dev *wpan_dev = &sdata->wpan_dev; + + ASSERT_RTNL(); + + /* PHY */ + params->transmit_power = wpan_dev->wpan_phy->transmit_power; + params->cca_mode = wpan_dev->wpan_phy->cca_mode; + params->cca_ed_level = wpan_dev->wpan_phy->cca_ed_level; + + /* MAC */ + params->min_be = wpan_dev->min_be; + params->max_be = wpan_dev->max_be; + params->csma_retries = wpan_dev->csma_retries; + params->frame_retries = wpan_dev->frame_retries; + params->lbt = wpan_dev->lbt; } static struct ieee802154_llsec_ops mac802154_llsec_ops = { @@ -102,12 +149,7 @@ static struct ieee802154_llsec_ops mac802154_llsec_ops = { .unlock_table = mac802154_unlock_table, }; -struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = { - .get_phy = mac802154_get_phy, -}; - struct ieee802154_mlme_ops mac802154_mlme_wpan = { - .get_phy = mac802154_get_phy, .start_req = mac802154_mlme_start_req, .get_pan_id = mac802154_dev_get_pan_id, .get_short_addr = mac802154_dev_get_short_addr, diff --git a/net/mac802154/main.c b/net/mac802154/main.c new file mode 100644 index 0000000000000000000000000000000000000000..8500378c8318cd3b5b7e3a368f44080b40c5900f --- /dev/null +++ b/net/mac802154/main.c @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2007-2012 Siemens AG + * + * Written by: + * Alexander Smirnov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ieee802154_i.h" +#include "cfg.h" + +static void ieee802154_tasklet_handler(unsigned long data) +{ + struct ieee802154_local *local = (struct ieee802154_local *)data; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&local->skb_queue))) { + switch (skb->pkt_type) { + case IEEE802154_RX_MSG: + /* Clear skb->pkt_type in order to not confuse kernel + * netstack. + */ + skb->pkt_type = 0; + ieee802154_rx(&local->hw, skb); + break; + default: + WARN(1, "mac802154: Packet is of unknown type %d\n", + skb->pkt_type); + kfree_skb(skb); + break; + } + } +} + +struct ieee802154_hw * +ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops) +{ + struct wpan_phy *phy; + struct ieee802154_local *local; + size_t priv_size; + + if (!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed || + !ops->start || !ops->stop || !ops->set_channel) { + pr_err("undefined IEEE802.15.4 device operations\n"); + return NULL; + } + + /* Ensure 32-byte alignment of our private data and hw private data. + * We use the wpan_phy priv data for both our ieee802154_local and for + * the driver's private data + * + * in memory it'll be like this: + * + * +-------------------------+ + * | struct wpan_phy | + * +-------------------------+ + * | struct ieee802154_local | + * +-------------------------+ + * | driver's private data | + * +-------------------------+ + * + * Due to ieee802154 layer isn't aware of driver and MAC structures, + * so lets align them here. + */ + + priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; + + phy = wpan_phy_new(&mac802154_config_ops, priv_size); + if (!phy) { + pr_err("failure to allocate master IEEE802.15.4 device\n"); + return NULL; + } + + phy->privid = mac802154_wpan_phy_privid; + + local = wpan_phy_priv(phy); + local->phy = phy; + local->hw.phy = local->phy; + local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); + local->ops = ops; + + INIT_LIST_HEAD(&local->interfaces); + mutex_init(&local->iflist_mtx); + + tasklet_init(&local->tasklet, + ieee802154_tasklet_handler, + (unsigned long)local); + + skb_queue_head_init(&local->skb_queue); + + return &local->hw; +} +EXPORT_SYMBOL(ieee802154_alloc_hw); + +void ieee802154_free_hw(struct ieee802154_hw *hw) +{ + struct ieee802154_local *local = hw_to_local(hw); + + BUG_ON(!list_empty(&local->interfaces)); + + mutex_destroy(&local->iflist_mtx); + + wpan_phy_free(local->phy); +} +EXPORT_SYMBOL(ieee802154_free_hw); + +static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy) +{ + /* TODO warn on empty symbol_duration + * Should be done when all drivers sets this value. + */ + + wpan_phy->lifs_period = IEEE802154_LIFS_PERIOD * + wpan_phy->symbol_duration; + wpan_phy->sifs_period = IEEE802154_SIFS_PERIOD * + wpan_phy->symbol_duration; +} + +int ieee802154_register_hw(struct ieee802154_hw *hw) +{ + struct ieee802154_local *local = hw_to_local(hw); + struct net_device *dev; + int rc = -ENOSYS; + + local->workqueue = + create_singlethread_workqueue(wpan_phy_name(local->phy)); + if (!local->workqueue) { + rc = -ENOMEM; + goto out; + } + + hrtimer_init(&local->ifs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + local->ifs_timer.function = ieee802154_xmit_ifs_timer; + + wpan_phy_set_dev(local->phy, local->hw.parent); + + ieee802154_setup_wpan_phy_pib(local->phy); + + rc = wpan_phy_register(local->phy); + if (rc < 0) + goto out_wq; + + rtnl_lock(); + + dev = ieee802154_if_add(local, "wpan%d", NL802154_IFTYPE_NODE, + cpu_to_le64(0x0000000000000000ULL)); + if (IS_ERR(dev)) { + rtnl_unlock(); + rc = PTR_ERR(dev); + goto out_wq; + } + + rtnl_unlock(); + + return 0; + +out_wq: + destroy_workqueue(local->workqueue); +out: + return rc; +} +EXPORT_SYMBOL(ieee802154_register_hw); + +void ieee802154_unregister_hw(struct ieee802154_hw *hw) +{ + struct ieee802154_local *local = hw_to_local(hw); + + tasklet_kill(&local->tasklet); + flush_workqueue(local->workqueue); + destroy_workqueue(local->workqueue); + + rtnl_lock(); + + ieee802154_remove_interfaces(local); + + rtnl_unlock(); + + wpan_phy_unregister(local->phy); +} +EXPORT_SYMBOL(ieee802154_unregister_hw); + +static int __init ieee802154_init(void) +{ + return ieee802154_iface_init(); +} + +static void __exit ieee802154_exit(void) +{ + ieee802154_iface_exit(); + + rcu_barrier(); +} + +subsys_initcall(ieee802154_init); +module_exit(ieee802154_exit); + +MODULE_DESCRIPTION("IEEE 802.15.4 subsystem"); +MODULE_LICENSE("GPL v2"); diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c index 868a040fd422ea6e877cc7df036b425f8b62008e..5cf019a57fd79cd601209971c349023e6c619ea0 100644 --- a/net/mac802154/mib.c +++ b/net/mac802154/mib.c @@ -10,10 +10,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Dmitry Eremin-Solenikov * Sergey Lapin @@ -25,208 +21,100 @@ #include #include -#include - -#include "mac802154.h" - -struct phy_chan_notify_work { - struct work_struct work; - struct net_device *dev; -}; - -struct hw_addr_filt_notify_work { - struct work_struct work; - struct net_device *dev; - unsigned long changed; -}; - -static struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - return priv->hw; -} - -static void hw_addr_notify(struct work_struct *work) -{ - struct hw_addr_filt_notify_work *nw = container_of(work, - struct hw_addr_filt_notify_work, work); - struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev); - int res; +#include - res = hw->ops->set_hw_addr_filt(&hw->hw, - &hw->hw.hw_filt, - nw->changed); - if (res) - pr_debug("failed changed mask %lx\n", nw->changed); - - kfree(nw); -} - -static void set_hw_addr_filt(struct net_device *dev, unsigned long changed) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - struct hw_addr_filt_notify_work *work; - - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return; - - INIT_WORK(&work->work, hw_addr_notify); - work->dev = dev; - work->changed = changed; - queue_work(priv->hw->dev_workqueue, &work->work); -} +#include "ieee802154_i.h" +#include "driver-ops.h" void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); BUG_ON(dev->type != ARPHRD_IEEE802154); - spin_lock_bh(&priv->mib_lock); - priv->short_addr = val; - spin_unlock_bh(&priv->mib_lock); - - if ((priv->hw->ops->set_hw_addr_filt) && - (priv->hw->hw.hw_filt.short_addr != priv->short_addr)) { - priv->hw->hw.hw_filt.short_addr = priv->short_addr; - set_hw_addr_filt(dev, IEEE802515_AFILT_SADDR_CHANGED); - } + spin_lock_bh(&sdata->mib_lock); + sdata->wpan_dev.short_addr = val; + spin_unlock_bh(&sdata->mib_lock); } __le16 mac802154_dev_get_short_addr(const struct net_device *dev) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); __le16 ret; BUG_ON(dev->type != ARPHRD_IEEE802154); - spin_lock_bh(&priv->mib_lock); - ret = priv->short_addr; - spin_unlock_bh(&priv->mib_lock); + spin_lock_bh(&sdata->mib_lock); + ret = sdata->wpan_dev.short_addr; + spin_unlock_bh(&sdata->mib_lock); return ret; } -void mac802154_dev_set_ieee_addr(struct net_device *dev) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - struct mac802154_priv *mac = priv->hw; - - priv->extended_addr = ieee802154_devaddr_from_raw(dev->dev_addr); - - if (mac->ops->set_hw_addr_filt && - mac->hw.hw_filt.ieee_addr != priv->extended_addr) { - mac->hw.hw_filt.ieee_addr = priv->extended_addr; - set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED); - } -} - __le16 mac802154_dev_get_pan_id(const struct net_device *dev) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); __le16 ret; BUG_ON(dev->type != ARPHRD_IEEE802154); - spin_lock_bh(&priv->mib_lock); - ret = priv->pan_id; - spin_unlock_bh(&priv->mib_lock); + spin_lock_bh(&sdata->mib_lock); + ret = sdata->wpan_dev.pan_id; + spin_unlock_bh(&sdata->mib_lock); return ret; } void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); BUG_ON(dev->type != ARPHRD_IEEE802154); - spin_lock_bh(&priv->mib_lock); - priv->pan_id = val; - spin_unlock_bh(&priv->mib_lock); - - if ((priv->hw->ops->set_hw_addr_filt) && - (priv->hw->hw.hw_filt.pan_id != priv->pan_id)) { - priv->hw->hw.hw_filt.pan_id = priv->pan_id; - set_hw_addr_filt(dev, IEEE802515_AFILT_PANID_CHANGED); - } + spin_lock_bh(&sdata->mib_lock); + sdata->wpan_dev.pan_id = val; + spin_unlock_bh(&sdata->mib_lock); } u8 mac802154_dev_get_dsn(const struct net_device *dev) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); BUG_ON(dev->type != ARPHRD_IEEE802154); - return priv->dsn++; -} - -static void phy_chan_notify(struct work_struct *work) -{ - struct phy_chan_notify_work *nw = container_of(work, - struct phy_chan_notify_work, work); - struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev); - struct mac802154_sub_if_data *priv = netdev_priv(nw->dev); - int res; - - mutex_lock(&priv->hw->phy->pib_lock); - res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan); - if (res) { - pr_debug("set_channel failed\n"); - } else { - priv->hw->phy->current_channel = priv->chan; - priv->hw->phy->current_page = priv->page; - } - mutex_unlock(&priv->hw->phy->pib_lock); - - kfree(nw); + return sdata->wpan_dev.dsn++; } void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); - struct phy_chan_notify_work *work; + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + struct ieee802154_local *local = sdata->local; + int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - spin_lock_bh(&priv->mib_lock); - priv->page = page; - priv->chan = chan; - spin_unlock_bh(&priv->mib_lock); - - mutex_lock(&priv->hw->phy->pib_lock); - if (priv->hw->phy->current_channel != priv->chan || - priv->hw->phy->current_page != priv->page) { - mutex_unlock(&priv->hw->phy->pib_lock); - - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return; - - INIT_WORK(&work->work, phy_chan_notify); - work->dev = dev; - queue_work(priv->hw->dev_workqueue, &work->work); + res = drv_set_channel(local, page, chan); + if (res) { + pr_debug("set_channel failed\n"); } else { - mutex_unlock(&priv->hw->phy->pib_lock); + mutex_lock(&local->phy->pib_lock); + local->phy->current_channel = chan; + local->phy->current_page = page; + mutex_unlock(&local->phy->pib_lock); } } - int mac802154_get_params(struct net_device *dev, struct ieee802154_llsec_params *params) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_get_params(&priv->sec, params); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_get_params(&sdata->sec, params); + mutex_unlock(&sdata->sec_mtx); return res; } @@ -235,31 +123,30 @@ int mac802154_set_params(struct net_device *dev, const struct ieee802154_llsec_params *params, int changed) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_set_params(&priv->sec, params, changed); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_set_params(&sdata->sec, params, changed); + mutex_unlock(&sdata->sec_mtx); return res; } - int mac802154_add_key(struct net_device *dev, const struct ieee802154_llsec_key_id *id, const struct ieee802154_llsec_key *key) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_key_add(&priv->sec, id, key); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_key_add(&sdata->sec, id, key); + mutex_unlock(&sdata->sec_mtx); return res; } @@ -267,61 +154,59 @@ int mac802154_add_key(struct net_device *dev, int mac802154_del_key(struct net_device *dev, const struct ieee802154_llsec_key_id *id) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_key_del(&priv->sec, id); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_key_del(&sdata->sec, id); + mutex_unlock(&sdata->sec_mtx); return res; } - int mac802154_add_dev(struct net_device *dev, const struct ieee802154_llsec_device *llsec_dev) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_dev_add(&priv->sec, llsec_dev); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_dev_add(&sdata->sec, llsec_dev); + mutex_unlock(&sdata->sec_mtx); return res; } int mac802154_del_dev(struct net_device *dev, __le64 dev_addr) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_dev_del(&priv->sec, dev_addr); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_dev_del(&sdata->sec, dev_addr); + mutex_unlock(&sdata->sec_mtx); return res; } - int mac802154_add_devkey(struct net_device *dev, __le64 device_addr, const struct ieee802154_llsec_device_key *key) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_devkey_add(&priv->sec, device_addr, key); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_devkey_add(&sdata->sec, device_addr, key); + mutex_unlock(&sdata->sec_mtx); return res; } @@ -330,30 +215,29 @@ int mac802154_del_devkey(struct net_device *dev, __le64 device_addr, const struct ieee802154_llsec_device_key *key) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_devkey_del(&priv->sec, device_addr, key); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_devkey_del(&sdata->sec, device_addr, key); + mutex_unlock(&sdata->sec_mtx); return res; } - int mac802154_add_seclevel(struct net_device *dev, const struct ieee802154_llsec_seclevel *sl) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_seclevel_add(&priv->sec, sl); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_seclevel_add(&sdata->sec, sl); + mutex_unlock(&sdata->sec_mtx); return res; } @@ -361,43 +245,42 @@ int mac802154_add_seclevel(struct net_device *dev, int mac802154_del_seclevel(struct net_device *dev, const struct ieee802154_llsec_seclevel *sl) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); - res = mac802154_llsec_seclevel_del(&priv->sec, sl); - mutex_unlock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); + res = mac802154_llsec_seclevel_del(&sdata->sec, sl); + mutex_unlock(&sdata->sec_mtx); return res; } - void mac802154_lock_table(struct net_device *dev) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_lock(&priv->sec_mtx); + mutex_lock(&sdata->sec_mtx); } void mac802154_get_table(struct net_device *dev, struct ieee802154_llsec_table **t) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); BUG_ON(dev->type != ARPHRD_IEEE802154); - *t = &priv->sec.table; + *t = &sdata->sec.table; } void mac802154_unlock_table(struct net_device *dev) { - struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); BUG_ON(dev->type != ARPHRD_IEEE802154); - mutex_unlock(&priv->sec_mtx); + mutex_unlock(&sdata->sec_mtx); } diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c deleted file mode 100644 index a68230e2b25f53b14907dac5bfd12d5cb09d1cab..0000000000000000000000000000000000000000 --- a/net/mac802154/monitor.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2007, 2008, 2009 Siemens AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Written by: - * Dmitry Eremin-Solenikov - * Sergey Lapin - * Maxim Gorbachyov - * Alexander Smirnov - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "mac802154.h" - -static netdev_tx_t mac802154_monitor_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct mac802154_sub_if_data *priv; - u8 chan, page; - - priv = netdev_priv(dev); - - /* FIXME: locking */ - chan = priv->hw->phy->current_channel; - page = priv->hw->phy->current_page; - - if (chan == MAC802154_CHAN_NONE) /* not initialized */ - return NETDEV_TX_OK; - - if (WARN_ON(page >= WPAN_NUM_PAGES) || - WARN_ON(chan >= WPAN_NUM_CHANNELS)) - return NETDEV_TX_OK; - - skb->skb_iif = dev->ifindex; - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - return mac802154_tx(priv->hw, skb, page, chan); -} - - -void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb) -{ - struct sk_buff *skb2; - struct mac802154_sub_if_data *sdata; - u16 crc = crc_ccitt(0, skb->data, skb->len); - u8 *data; - - rcu_read_lock(); - list_for_each_entry_rcu(sdata, &priv->slaves, list) { - if (sdata->type != IEEE802154_DEV_MONITOR || - !netif_running(sdata->dev)) - continue; - - skb2 = skb_clone(skb, GFP_ATOMIC); - skb2->dev = sdata->dev; - skb2->pkt_type = PACKET_HOST; - data = skb_put(skb2, 2); - data[0] = crc & 0xff; - data[1] = crc >> 8; - - netif_rx_ni(skb2); - } - rcu_read_unlock(); -} - -static const struct net_device_ops mac802154_monitor_ops = { - .ndo_open = mac802154_slave_open, - .ndo_stop = mac802154_slave_close, - .ndo_start_xmit = mac802154_monitor_xmit, -}; - -void mac802154_monitor_setup(struct net_device *dev) -{ - struct mac802154_sub_if_data *priv; - - dev->addr_len = 0; - dev->hard_header_len = 0; - dev->needed_tailroom = 2; /* room for FCS */ - dev->mtu = IEEE802154_MTU; - dev->tx_queue_len = 10; - dev->type = ARPHRD_IEEE802154_MONITOR; - dev->flags = IFF_NOARP | IFF_BROADCAST; - dev->watchdog_timeo = 0; - - dev->destructor = free_netdev; - dev->netdev_ops = &mac802154_monitor_ops; - dev->ml_priv = &mac802154_mlme_reduced; - - priv = netdev_priv(dev); - priv->type = IEEE802154_DEV_MONITOR; - - priv->chan = MAC802154_CHAN_NONE; /* not initialized */ - priv->page = 0; -} diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index a14cf9ede171e7bcc2c1373931e5e7754b36bab4..c0d67b2b4132b033ca28446556c5aefe7e047ee6 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -10,10 +10,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Pavel Smolenskiy * Maxim Gorbachyov @@ -23,92 +19,284 @@ #include #include -#include #include #include +#include #include #include +#include -#include "mac802154.h" +#include "ieee802154_i.h" -/* The IEEE 802.15.4 standard defines 4 MAC packet types: - * - beacon frame - * - MAC command frame - * - acknowledgement frame - * - data frame - * - * and only the data frame should be pushed to the upper layers, other types - * are just internal MAC layer management information. So only data packets - * are going to be sent to the networking queue, all other will be processed - * right here by using the device workqueue. - */ -struct rx_work { - struct sk_buff *skb; - struct work_struct work; - struct ieee802154_dev *dev; - u8 lqi; -}; +static int ieee802154_deliver_skb(struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->protocol = htons(ETH_P_IEEE802154); -static void -mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi) + return netif_receive_skb(skb); +} + +static int +ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, + struct sk_buff *skb, const struct ieee802154_hdr *hdr) { - struct mac802154_priv *priv = mac802154_to_priv(hw); + struct wpan_dev *wpan_dev = &sdata->wpan_dev; + __le16 span, sshort; + int rc; - mac_cb(skb)->lqi = lqi; - skb->protocol = htons(ETH_P_IEEE802154); - skb_reset_mac_header(skb); + pr_debug("getting packet via slave interface %s\n", sdata->dev->name); - if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { - u16 crc; + spin_lock_bh(&sdata->mib_lock); - if (skb->len < 2) { - pr_debug("got invalid frame\n"); - goto fail; - } - crc = crc_ccitt(0, skb->data, skb->len); - if (crc) { - pr_debug("CRC mismatch\n"); - goto fail; - } - skb_trim(skb, skb->len - 2); /* CRC */ + span = wpan_dev->pan_id; + sshort = wpan_dev->short_addr; + + switch (mac_cb(skb)->dest.mode) { + case IEEE802154_ADDR_NONE: + if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE) + /* FIXME: check if we are PAN coordinator */ + skb->pkt_type = PACKET_OTHERHOST; + else + /* ACK comes with both addresses empty */ + skb->pkt_type = PACKET_HOST; + break; + case IEEE802154_ADDR_LONG: + if (mac_cb(skb)->dest.pan_id != span && + mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) + skb->pkt_type = PACKET_OTHERHOST; + else if (mac_cb(skb)->dest.extended_addr == wpan_dev->extended_addr) + skb->pkt_type = PACKET_HOST; + else + skb->pkt_type = PACKET_OTHERHOST; + break; + case IEEE802154_ADDR_SHORT: + if (mac_cb(skb)->dest.pan_id != span && + mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) + skb->pkt_type = PACKET_OTHERHOST; + else if (mac_cb(skb)->dest.short_addr == sshort) + skb->pkt_type = PACKET_HOST; + else if (mac_cb(skb)->dest.short_addr == + cpu_to_le16(IEEE802154_ADDR_BROADCAST)) + skb->pkt_type = PACKET_BROADCAST; + else + skb->pkt_type = PACKET_OTHERHOST; + break; + default: + spin_unlock_bh(&sdata->mib_lock); + pr_debug("invalid dest mode\n"); + goto fail; } - mac802154_monitors_rx(priv, skb); - mac802154_wpans_rx(priv, skb); + spin_unlock_bh(&sdata->mib_lock); - return; + skb->dev = sdata->dev; + + rc = mac802154_llsec_decrypt(&sdata->sec, skb); + if (rc) { + pr_debug("decryption failed: %i\n", rc); + goto fail; + } + + sdata->dev->stats.rx_packets++; + sdata->dev->stats.rx_bytes += skb->len; + + switch (mac_cb(skb)->type) { + case IEEE802154_FC_TYPE_DATA: + return ieee802154_deliver_skb(skb); + default: + pr_warn("ieee802154: bad frame received (type = %d)\n", + mac_cb(skb)->type); + goto fail; + } fail: kfree_skb(skb); + return NET_RX_DROP; } -static void mac802154_rx_worker(struct work_struct *work) +static void +ieee802154_print_addr(const char *name, const struct ieee802154_addr *addr) { - struct rx_work *rw = container_of(work, struct rx_work, work); + if (addr->mode == IEEE802154_ADDR_NONE) + pr_debug("%s not present\n", name); + + pr_debug("%s PAN ID: %04x\n", name, le16_to_cpu(addr->pan_id)); + if (addr->mode == IEEE802154_ADDR_SHORT) { + pr_debug("%s is short: %04x\n", name, + le16_to_cpu(addr->short_addr)); + } else { + u64 hw = swab64((__force u64)addr->extended_addr); - mac802154_subif_rx(rw->dev, rw->skb, rw->lqi); - kfree(rw); + pr_debug("%s is hardware: %8phC\n", name, &hw); + } } -void -ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi) +static int +ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr) { - struct mac802154_priv *priv = mac802154_to_priv(dev); - struct rx_work *work; + int hlen; + struct ieee802154_mac_cb *cb = mac_cb_init(skb); - if (!skb) - return; + skb_reset_mac_header(skb); + + hlen = ieee802154_hdr_pull(skb, hdr); + if (hlen < 0) + return -EINVAL; + + skb->mac_len = hlen; + + pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc), + hdr->seq); + + cb->type = hdr->fc.type; + cb->ackreq = hdr->fc.ack_request; + cb->secen = hdr->fc.security_enabled; + + ieee802154_print_addr("destination", &hdr->dest); + ieee802154_print_addr("source", &hdr->source); + + cb->source = hdr->source; + cb->dest = hdr->dest; + + if (hdr->fc.security_enabled) { + u64 key; + + pr_debug("seclevel %i\n", hdr->sec.level); + + switch (hdr->sec.key_id_mode) { + case IEEE802154_SCF_KEY_IMPLICIT: + pr_debug("implicit key\n"); + break; + + case IEEE802154_SCF_KEY_INDEX: + pr_debug("key %02x\n", hdr->sec.key_id); + break; + + case IEEE802154_SCF_KEY_SHORT_INDEX: + pr_debug("key %04x:%04x %02x\n", + le32_to_cpu(hdr->sec.short_src) >> 16, + le32_to_cpu(hdr->sec.short_src) & 0xffff, + hdr->sec.key_id); + break; + + case IEEE802154_SCF_KEY_HW_INDEX: + key = swab64((__force u64)hdr->sec.extended_src); + pr_debug("key source %8phC %02x\n", &key, + hdr->sec.key_id); + break; + } + } + + return 0; +} + +static void +__ieee802154_rx_handle_packet(struct ieee802154_local *local, + struct sk_buff *skb) +{ + int ret; + struct ieee802154_sub_if_data *sdata; + struct ieee802154_hdr hdr; - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) + ret = ieee802154_parse_frame_start(skb, &hdr); + if (ret) { + pr_debug("got invalid frame\n"); + kfree_skb(skb); return; + } + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (sdata->vif.type != NL802154_IFTYPE_NODE || + !netif_running(sdata->dev)) + continue; + + ieee802154_subif_frame(sdata, skb, &hdr); + skb = NULL; + break; + } + + if (skb) + kfree_skb(skb); +} + +static void +ieee802154_monitors_rx(struct ieee802154_local *local, struct sk_buff *skb) +{ + struct sk_buff *skb2; + struct ieee802154_sub_if_data *sdata; + + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_IEEE802154); + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (sdata->vif.type != NL802154_IFTYPE_MONITOR) + continue; + + if (!ieee802154_sdata_running(sdata)) + continue; + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) { + skb2->dev = sdata->dev; + ieee802154_deliver_skb(skb2); + + sdata->dev->stats.rx_packets++; + sdata->dev->stats.rx_bytes += skb->len; + } + } +} + +void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb) +{ + struct ieee802154_local *local = hw_to_local(hw); + u16 crc; - INIT_WORK(&work->work, mac802154_rx_worker); - work->skb = skb; - work->dev = dev; - work->lqi = lqi; + WARN_ON_ONCE(softirq_count() == 0); - queue_work(priv->dev_workqueue, &work->work); + /* TODO: When a transceiver omits the checksum here, we + * add an own calculated one. This is currently an ugly + * solution because the monitor needs a crc here. + */ + if (local->hw.flags & IEEE802154_HW_RX_OMIT_CKSUM) { + crc = crc_ccitt(0, skb->data, skb->len); + put_unaligned_le16(crc, skb_put(skb, 2)); + } + + rcu_read_lock(); + + ieee802154_monitors_rx(local, skb); + + /* Check if transceiver doesn't validate the checksum. + * If not we validate the checksum here. + */ + if (local->hw.flags & IEEE802154_HW_RX_DROP_BAD_CKSUM) { + crc = crc_ccitt(0, skb->data, skb->len); + if (crc) { + rcu_read_unlock(); + kfree_skb(skb); + return; + } + } + /* remove crc */ + skb_trim(skb, skb->len - 2); + + __ieee802154_rx_handle_packet(local, skb); + + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee802154_rx); + +void +ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi) +{ + struct ieee802154_local *local = hw_to_local(hw); + + mac_cb(skb)->lqi = lqi; + skb->pkt_type = IEEE802154_RX_MSG; + skb_queue_tail(&local->skb_queue, skb); + tasklet_schedule(&local->tasklet); } EXPORT_SYMBOL(ieee802154_rx_irqsafe); diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index fdf4c0e67259ecceacc0b06b9060405ded6f63cc..c62e95695c7843947c8643cb268f95f2e64c3da9 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -10,10 +10,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Dmitry Eremin-Solenikov * Sergey Lapin @@ -24,106 +20,98 @@ #include #include #include +#include +#include #include #include -#include +#include -#include "mac802154.h" +#include "ieee802154_i.h" +#include "driver-ops.h" /* IEEE 802.15.4 transceivers can sleep during the xmit session, so process * packets through the workqueue. */ -struct xmit_work { +struct ieee802154_xmit_cb { struct sk_buff *skb; struct work_struct work; - struct mac802154_priv *priv; - u8 chan; - u8 page; + struct ieee802154_local *local; }; -static void mac802154_xmit_worker(struct work_struct *work) +static struct ieee802154_xmit_cb ieee802154_xmit_cb; + +static void ieee802154_xmit_worker(struct work_struct *work) { - struct xmit_work *xw = container_of(work, struct xmit_work, work); - struct mac802154_sub_if_data *sdata; + struct ieee802154_xmit_cb *cb = + container_of(work, struct ieee802154_xmit_cb, work); + struct ieee802154_local *local = cb->local; + struct sk_buff *skb = cb->skb; + struct net_device *dev = skb->dev; int res; - mutex_lock(&xw->priv->phy->pib_lock); - if (xw->priv->phy->current_channel != xw->chan || - xw->priv->phy->current_page != xw->page) { - res = xw->priv->ops->set_channel(&xw->priv->hw, - xw->page, - xw->chan); - if (res) { - pr_debug("set_channel failed\n"); - goto out; - } + rtnl_lock(); - xw->priv->phy->current_channel = xw->chan; - xw->priv->phy->current_page = xw->page; - } + /* check if ifdown occurred while schedule */ + if (!netif_running(dev)) + goto err_tx; - res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb); + res = drv_xmit_sync(local, skb); if (res) - pr_debug("transmission failed\n"); + goto err_tx; -out: - mutex_unlock(&xw->priv->phy->pib_lock); + ieee802154_xmit_complete(&local->hw, skb, false); - /* Restart the netif queue on each sub_if_data object. */ - rcu_read_lock(); - list_for_each_entry_rcu(sdata, &xw->priv->slaves, list) - netif_wake_queue(sdata->dev); - rcu_read_unlock(); + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; - dev_kfree_skb(xw->skb); + rtnl_unlock(); - kfree(xw); + return; + +err_tx: + /* Restart the netif queue on each sub_if_data object. */ + ieee802154_wake_queue(&local->hw); + rtnl_unlock(); + kfree_skb(skb); + netdev_dbg(dev, "transmission failed\n"); } -netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, - u8 page, u8 chan) +static netdev_tx_t +ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) { - struct xmit_work *work; - struct mac802154_sub_if_data *sdata; - - if (!(priv->phy->channels_supported[page] & (1 << chan))) { - WARN_ON(1); - goto err_tx; - } - - mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb); + struct net_device *dev = skb->dev; + int ret; - if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { + if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) { u16 crc = crc_ccitt(0, skb->data, skb->len); - u8 *data = skb_put(skb, 2); - data[0] = crc & 0xff; - data[1] = crc >> 8; + put_unaligned_le16(crc, skb_put(skb, 2)); } - if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) + if (skb_cow_head(skb, local->hw.extra_tx_headroom)) goto err_tx; - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - kfree_skb(skb); - return NETDEV_TX_BUSY; - } - /* Stop the netif queue on each sub_if_data object. */ - rcu_read_lock(); - list_for_each_entry_rcu(sdata, &priv->slaves, list) - netif_stop_queue(sdata->dev); - rcu_read_unlock(); + ieee802154_stop_queue(&local->hw); + + /* async is priority, otherwise sync is fallback */ + if (local->ops->xmit_async) { + ret = drv_xmit_async(local, skb); + if (ret) { + ieee802154_wake_queue(&local->hw); + goto err_tx; + } - INIT_WORK(&work->work, mac802154_xmit_worker); - work->skb = skb; - work->priv = priv; - work->page = page; - work->chan = chan; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + } else { + INIT_WORK(&ieee802154_xmit_cb.work, ieee802154_xmit_worker); + ieee802154_xmit_cb.skb = skb; + ieee802154_xmit_cb.local = local; - queue_work(priv->dev_workqueue, &work->work); + queue_work(local->workqueue, &ieee802154_xmit_cb.work); + } return NETDEV_TX_OK; @@ -131,3 +119,31 @@ err_tx: kfree_skb(skb); return NETDEV_TX_OK; } + +netdev_tx_t +ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + + skb->skb_iif = dev->ifindex; + + return ieee802154_tx(sdata->local, skb); +} + +netdev_tx_t +ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); + int rc; + + rc = mac802154_llsec_encrypt(&sdata->sec, skb); + if (rc) { + netdev_warn(dev, "encryption failed: %i\n", rc); + kfree_skb(skb); + return NETDEV_TX_OK; + } + + skb->skb_iif = dev->ifindex; + + return ieee802154_tx(sdata->local, skb); +} diff --git a/net/mac802154/util.c b/net/mac802154/util.c new file mode 100644 index 0000000000000000000000000000000000000000..5fc979027919749a604a47bcd45148fbe5505f03 --- /dev/null +++ b/net/mac802154/util.c @@ -0,0 +1,84 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Authors: + * Alexander Aring + * + * Based on: net/mac80211/util.c + */ + +#include "ieee802154_i.h" + +/* privid for wpan_phys to determine whether they belong to us or not */ +const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid; + +void ieee802154_wake_queue(struct ieee802154_hw *hw) +{ + struct ieee802154_local *local = hw_to_local(hw); + struct ieee802154_sub_if_data *sdata; + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!sdata->dev) + continue; + + netif_wake_queue(sdata->dev); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee802154_wake_queue); + +void ieee802154_stop_queue(struct ieee802154_hw *hw) +{ + struct ieee802154_local *local = hw_to_local(hw); + struct ieee802154_sub_if_data *sdata; + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!sdata->dev) + continue; + + netif_stop_queue(sdata->dev); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee802154_stop_queue); + +enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer) +{ + struct ieee802154_local *local = + container_of(timer, struct ieee802154_local, ifs_timer); + + ieee802154_wake_queue(&local->hw); + + return HRTIMER_NORESTART; +} + +void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, + bool ifs_handling) +{ + if (ifs_handling) { + struct ieee802154_local *local = hw_to_local(hw); + + if (skb->len > 18) + hrtimer_start(&local->ifs_timer, + ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC), + HRTIMER_MODE_REL); + else + hrtimer_start(&local->ifs_timer, + ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC), + HRTIMER_MODE_REL); + + consume_skb(skb); + } else { + ieee802154_wake_queue(hw); + consume_skb(skb); + } +} +EXPORT_SYMBOL(ieee802154_xmit_complete); diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c deleted file mode 100644 index 4ab86a57dca55212bddaac61f7c3965a255934ba..0000000000000000000000000000000000000000 --- a/net/mac802154/wpan.c +++ /dev/null @@ -1,599 +0,0 @@ -/* - * Copyright 2007-2012 Siemens AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Written by: - * Dmitry Eremin-Solenikov - * Sergey Lapin - * Maxim Gorbachyov - * Alexander Smirnov - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "mac802154.h" - -static int mac802154_wpan_update_llsec(struct net_device *dev) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); - int rc = 0; - - if (ops->llsec) { - struct ieee802154_llsec_params params; - int changed = 0; - - params.pan_id = priv->pan_id; - changed |= IEEE802154_LLSEC_PARAM_PAN_ID; - - params.hwaddr = priv->extended_addr; - changed |= IEEE802154_LLSEC_PARAM_HWADDR; - - rc = ops->llsec->set_params(dev, ¶ms, changed); - } - - return rc; -} - -static int -mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - struct sockaddr_ieee802154 *sa = - (struct sockaddr_ieee802154 *)&ifr->ifr_addr; - int err = -ENOIOCTLCMD; - - spin_lock_bh(&priv->mib_lock); - - switch (cmd) { - case SIOCGIFADDR: - { - u16 pan_id, short_addr; - - pan_id = le16_to_cpu(priv->pan_id); - short_addr = le16_to_cpu(priv->short_addr); - if (pan_id == IEEE802154_PANID_BROADCAST || - short_addr == IEEE802154_ADDR_BROADCAST) { - err = -EADDRNOTAVAIL; - break; - } - - sa->family = AF_IEEE802154; - sa->addr.addr_type = IEEE802154_ADDR_SHORT; - sa->addr.pan_id = pan_id; - sa->addr.short_addr = short_addr; - - err = 0; - break; - } - case SIOCSIFADDR: - dev_warn(&dev->dev, - "Using DEBUGing ioctl SIOCSIFADDR isn't recommended!\n"); - if (sa->family != AF_IEEE802154 || - sa->addr.addr_type != IEEE802154_ADDR_SHORT || - sa->addr.pan_id == IEEE802154_PANID_BROADCAST || - sa->addr.short_addr == IEEE802154_ADDR_BROADCAST || - sa->addr.short_addr == IEEE802154_ADDR_UNDEF) { - err = -EINVAL; - break; - } - - priv->pan_id = cpu_to_le16(sa->addr.pan_id); - priv->short_addr = cpu_to_le16(sa->addr.short_addr); - - err = mac802154_wpan_update_llsec(dev); - break; - } - - spin_unlock_bh(&priv->mib_lock); - return err; -} - -static int mac802154_wpan_mac_addr(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - - if (netif_running(dev)) - return -EBUSY; - - /* FIXME: validate addr */ - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - mac802154_dev_set_ieee_addr(dev); - return mac802154_wpan_update_llsec(dev); -} - -int mac802154_set_mac_params(struct net_device *dev, - const struct ieee802154_mac_params *params) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - - mutex_lock(&priv->hw->slaves_mtx); - priv->mac_params = *params; - mutex_unlock(&priv->hw->slaves_mtx); - - return 0; -} - -void mac802154_get_mac_params(struct net_device *dev, - struct ieee802154_mac_params *params) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - - mutex_lock(&priv->hw->slaves_mtx); - *params = priv->mac_params; - mutex_unlock(&priv->hw->slaves_mtx); -} - -static int mac802154_wpan_open(struct net_device *dev) -{ - int rc; - struct mac802154_sub_if_data *priv = netdev_priv(dev); - struct wpan_phy *phy = priv->hw->phy; - - rc = mac802154_slave_open(dev); - if (rc < 0) - return rc; - - mutex_lock(&phy->pib_lock); - - if (phy->set_txpower) { - rc = phy->set_txpower(phy, priv->mac_params.transmit_power); - if (rc < 0) - goto out; - } - - if (phy->set_lbt) { - rc = phy->set_lbt(phy, priv->mac_params.lbt); - if (rc < 0) - goto out; - } - - if (phy->set_cca_mode) { - rc = phy->set_cca_mode(phy, priv->mac_params.cca_mode); - if (rc < 0) - goto out; - } - - if (phy->set_cca_ed_level) { - rc = phy->set_cca_ed_level(phy, priv->mac_params.cca_ed_level); - if (rc < 0) - goto out; - } - - if (phy->set_csma_params) { - rc = phy->set_csma_params(phy, priv->mac_params.min_be, - priv->mac_params.max_be, - priv->mac_params.csma_retries); - if (rc < 0) - goto out; - } - - if (phy->set_frame_retries) { - rc = phy->set_frame_retries(phy, - priv->mac_params.frame_retries); - if (rc < 0) - goto out; - } - - mutex_unlock(&phy->pib_lock); - return 0; - -out: - mutex_unlock(&phy->pib_lock); - return rc; -} - -static int mac802154_set_header_security(struct mac802154_sub_if_data *priv, - struct ieee802154_hdr *hdr, - const struct ieee802154_mac_cb *cb) -{ - struct ieee802154_llsec_params params; - u8 level; - - mac802154_llsec_get_params(&priv->sec, ¶ms); - - if (!params.enabled && cb->secen_override && cb->secen) - return -EINVAL; - if (!params.enabled || - (cb->secen_override && !cb->secen) || - !params.out_level) - return 0; - if (cb->seclevel_override && !cb->seclevel) - return -EINVAL; - - level = cb->seclevel_override ? cb->seclevel : params.out_level; - - hdr->fc.security_enabled = 1; - hdr->sec.level = level; - hdr->sec.key_id_mode = params.out_key.mode; - if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX) - hdr->sec.short_src = params.out_key.short_source; - else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX) - hdr->sec.extended_src = params.out_key.extended_source; - hdr->sec.key_id = params.out_key.id; - - return 0; -} - -static int mac802154_header_create(struct sk_buff *skb, - struct net_device *dev, - unsigned short type, - const void *daddr, - const void *saddr, - unsigned len) -{ - struct ieee802154_hdr hdr; - struct mac802154_sub_if_data *priv = netdev_priv(dev); - struct ieee802154_mac_cb *cb = mac_cb(skb); - int hlen; - - if (!daddr) - return -EINVAL; - - memset(&hdr.fc, 0, sizeof(hdr.fc)); - hdr.fc.type = cb->type; - hdr.fc.security_enabled = cb->secen; - hdr.fc.ack_request = cb->ackreq; - hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev); - - if (mac802154_set_header_security(priv, &hdr, cb) < 0) - return -EINVAL; - - if (!saddr) { - spin_lock_bh(&priv->mib_lock); - - if (priv->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) || - priv->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) || - priv->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) { - hdr.source.mode = IEEE802154_ADDR_LONG; - hdr.source.extended_addr = priv->extended_addr; - } else { - hdr.source.mode = IEEE802154_ADDR_SHORT; - hdr.source.short_addr = priv->short_addr; - } - - hdr.source.pan_id = priv->pan_id; - - spin_unlock_bh(&priv->mib_lock); - } else { - hdr.source = *(const struct ieee802154_addr *)saddr; - } - - hdr.dest = *(const struct ieee802154_addr *)daddr; - - hlen = ieee802154_hdr_push(skb, &hdr); - if (hlen < 0) - return -EINVAL; - - skb_reset_mac_header(skb); - skb->mac_len = hlen; - - if (len > ieee802154_max_payload(&hdr)) - return -EMSGSIZE; - - return hlen; -} - -static int -mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr) -{ - struct ieee802154_hdr hdr; - struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr; - - if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) { - pr_debug("malformed packet\n"); - return 0; - } - - *addr = hdr.source; - return sizeof(*addr); -} - -static netdev_tx_t -mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct mac802154_sub_if_data *priv; - u8 chan, page; - int rc; - - priv = netdev_priv(dev); - - spin_lock_bh(&priv->mib_lock); - chan = priv->chan; - page = priv->page; - spin_unlock_bh(&priv->mib_lock); - - if (chan == MAC802154_CHAN_NONE || - page >= WPAN_NUM_PAGES || - chan >= WPAN_NUM_CHANNELS) { - kfree_skb(skb); - return NETDEV_TX_OK; - } - - rc = mac802154_llsec_encrypt(&priv->sec, skb); - if (rc) { - pr_warn("encryption failed: %i\n", rc); - kfree_skb(skb); - return NETDEV_TX_OK; - } - - skb->skb_iif = dev->ifindex; - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - return mac802154_tx(priv->hw, skb, page, chan); -} - -static struct header_ops mac802154_header_ops = { - .create = mac802154_header_create, - .parse = mac802154_header_parse, -}; - -static const struct net_device_ops mac802154_wpan_ops = { - .ndo_open = mac802154_wpan_open, - .ndo_stop = mac802154_slave_close, - .ndo_start_xmit = mac802154_wpan_xmit, - .ndo_do_ioctl = mac802154_wpan_ioctl, - .ndo_set_mac_address = mac802154_wpan_mac_addr, -}; - -static void mac802154_wpan_free(struct net_device *dev) -{ - struct mac802154_sub_if_data *priv = netdev_priv(dev); - - mac802154_llsec_destroy(&priv->sec); - - free_netdev(dev); -} - -void mac802154_wpan_setup(struct net_device *dev) -{ - struct mac802154_sub_if_data *priv; - - dev->addr_len = IEEE802154_ADDR_LEN; - memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); - - dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN; - dev->header_ops = &mac802154_header_ops; - dev->needed_tailroom = 2 + 16; /* FCS + MIC */ - dev->mtu = IEEE802154_MTU; - dev->tx_queue_len = 300; - dev->type = ARPHRD_IEEE802154; - dev->flags = IFF_NOARP | IFF_BROADCAST; - dev->watchdog_timeo = 0; - - dev->destructor = mac802154_wpan_free; - dev->netdev_ops = &mac802154_wpan_ops; - dev->ml_priv = &mac802154_mlme_wpan; - - priv = netdev_priv(dev); - priv->type = IEEE802154_DEV_WPAN; - - priv->chan = MAC802154_CHAN_NONE; - priv->page = 0; - - spin_lock_init(&priv->mib_lock); - mutex_init(&priv->sec_mtx); - - get_random_bytes(&priv->bsn, 1); - get_random_bytes(&priv->dsn, 1); - - /* defaults per 802.15.4-2011 */ - priv->mac_params.min_be = 3; - priv->mac_params.max_be = 5; - priv->mac_params.csma_retries = 4; - priv->mac_params.frame_retries = -1; /* for compatibility, actual default is 3 */ - - priv->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); - priv->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); - - mac802154_llsec_init(&priv->sec); -} - -static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb) -{ - return netif_rx_ni(skb); -} - -static int -mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb, - const struct ieee802154_hdr *hdr) -{ - __le16 span, sshort; - int rc; - - pr_debug("getting packet via slave interface %s\n", sdata->dev->name); - - spin_lock_bh(&sdata->mib_lock); - - span = sdata->pan_id; - sshort = sdata->short_addr; - - switch (mac_cb(skb)->dest.mode) { - case IEEE802154_ADDR_NONE: - if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE) - /* FIXME: check if we are PAN coordinator */ - skb->pkt_type = PACKET_OTHERHOST; - else - /* ACK comes with both addresses empty */ - skb->pkt_type = PACKET_HOST; - break; - case IEEE802154_ADDR_LONG: - if (mac_cb(skb)->dest.pan_id != span && - mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) - skb->pkt_type = PACKET_OTHERHOST; - else if (mac_cb(skb)->dest.extended_addr == sdata->extended_addr) - skb->pkt_type = PACKET_HOST; - else - skb->pkt_type = PACKET_OTHERHOST; - break; - case IEEE802154_ADDR_SHORT: - if (mac_cb(skb)->dest.pan_id != span && - mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) - skb->pkt_type = PACKET_OTHERHOST; - else if (mac_cb(skb)->dest.short_addr == sshort) - skb->pkt_type = PACKET_HOST; - else if (mac_cb(skb)->dest.short_addr == - cpu_to_le16(IEEE802154_ADDR_BROADCAST)) - skb->pkt_type = PACKET_BROADCAST; - else - skb->pkt_type = PACKET_OTHERHOST; - break; - default: - spin_unlock_bh(&sdata->mib_lock); - pr_debug("invalid dest mode\n"); - kfree_skb(skb); - return NET_RX_DROP; - } - - spin_unlock_bh(&sdata->mib_lock); - - skb->dev = sdata->dev; - - rc = mac802154_llsec_decrypt(&sdata->sec, skb); - if (rc) { - pr_debug("decryption failed: %i\n", rc); - goto fail; - } - - sdata->dev->stats.rx_packets++; - sdata->dev->stats.rx_bytes += skb->len; - - switch (mac_cb(skb)->type) { - case IEEE802154_FC_TYPE_DATA: - return mac802154_process_data(sdata->dev, skb); - default: - pr_warn("ieee802154: bad frame received (type = %d)\n", - mac_cb(skb)->type); - goto fail; - } - -fail: - kfree_skb(skb); - return NET_RX_DROP; -} - -static void mac802154_print_addr(const char *name, - const struct ieee802154_addr *addr) -{ - if (addr->mode == IEEE802154_ADDR_NONE) - pr_debug("%s not present\n", name); - - pr_debug("%s PAN ID: %04x\n", name, le16_to_cpu(addr->pan_id)); - if (addr->mode == IEEE802154_ADDR_SHORT) { - pr_debug("%s is short: %04x\n", name, - le16_to_cpu(addr->short_addr)); - } else { - u64 hw = swab64((__force u64) addr->extended_addr); - - pr_debug("%s is hardware: %8phC\n", name, &hw); - } -} - -static int mac802154_parse_frame_start(struct sk_buff *skb, - struct ieee802154_hdr *hdr) -{ - int hlen; - struct ieee802154_mac_cb *cb = mac_cb_init(skb); - - hlen = ieee802154_hdr_pull(skb, hdr); - if (hlen < 0) - return -EINVAL; - - skb->mac_len = hlen; - - pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc), - hdr->seq); - - cb->type = hdr->fc.type; - cb->ackreq = hdr->fc.ack_request; - cb->secen = hdr->fc.security_enabled; - - mac802154_print_addr("destination", &hdr->dest); - mac802154_print_addr("source", &hdr->source); - - cb->source = hdr->source; - cb->dest = hdr->dest; - - if (hdr->fc.security_enabled) { - u64 key; - - pr_debug("seclevel %i\n", hdr->sec.level); - - switch (hdr->sec.key_id_mode) { - case IEEE802154_SCF_KEY_IMPLICIT: - pr_debug("implicit key\n"); - break; - - case IEEE802154_SCF_KEY_INDEX: - pr_debug("key %02x\n", hdr->sec.key_id); - break; - - case IEEE802154_SCF_KEY_SHORT_INDEX: - pr_debug("key %04x:%04x %02x\n", - le32_to_cpu(hdr->sec.short_src) >> 16, - le32_to_cpu(hdr->sec.short_src) & 0xffff, - hdr->sec.key_id); - break; - - case IEEE802154_SCF_KEY_HW_INDEX: - key = swab64((__force u64) hdr->sec.extended_src); - pr_debug("key source %8phC %02x\n", &key, - hdr->sec.key_id); - break; - } - } - - return 0; -} - -void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb) -{ - int ret; - struct mac802154_sub_if_data *sdata; - struct ieee802154_hdr hdr; - - ret = mac802154_parse_frame_start(skb, &hdr); - if (ret) { - pr_debug("got invalid frame\n"); - kfree_skb(skb); - return; - } - - rcu_read_lock(); - list_for_each_entry_rcu(sdata, &priv->slaves, list) { - if (sdata->type != IEEE802154_DEV_WPAN || - !netif_running(sdata->dev)) - continue; - - mac802154_subif_frame(sdata, skb, &hdr); - skb = NULL; - break; - } - rcu_read_unlock(); - - if (skb) - kfree_skb(skb); -} diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index e3545f21a099b01b931cd863205a847acd336238..ca27837974fedd82fffcd9c91aa565c60534b13c 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -34,8 +34,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | - SKB_GSO_IPIP | - SKB_GSO_MPLS))) + SKB_GSO_IPIP))) goto out; /* Setup inner SKB. */ diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index ae5096ab65eb54cff350f8a19e981b6d8906b16e..b02660fa9eb00cd28aeb01f10b76f6604eae8096 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -411,6 +411,13 @@ config NF_NAT_TFTP depends on NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_TFTP +config NF_NAT_REDIRECT + tristate "IPv4/IPv6 redirect support" + depends on NF_NAT + help + This is the kernel functionality to redirect packets to local + machine through NAT. + config NETFILTER_SYNPROXY tristate @@ -505,6 +512,15 @@ config NFT_MASQ This option adds the "masquerade" expression that you can use to perform NAT in the masquerade flavour. +config NFT_REDIR + depends on NF_TABLES + depends on NF_CONNTRACK + depends on NF_NAT + tristate "Netfilter nf_tables redirect support" + help + This options adds the "redirect" expression that you can use + to perform NAT in the redirect flavour. + config NFT_NAT depends on NF_TABLES depends on NF_CONNTRACK @@ -835,6 +851,7 @@ config NETFILTER_XT_TARGET_RATEEST config NETFILTER_XT_TARGET_REDIRECT tristate "REDIRECT target support" depends on NF_NAT + select NF_NAT_REDIRECT ---help--- REDIRECT is a special case of NAT: all incoming connections are mapped onto the incoming interface's address, causing the packets to diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index a9571be3f7914cff0619cb5c7d5829f56feab35f..89f73a9e98741702f2ea324e49b7b2f75bfb69fd 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -51,6 +51,7 @@ nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \ obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o obj-$(CONFIG_NF_NAT) += nf_nat.o +obj-$(CONFIG_NF_NAT_REDIRECT) += nf_nat_redirect.o # NAT protocols (nf_nat) obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o @@ -88,6 +89,7 @@ obj-$(CONFIG_NFT_HASH) += nft_hash.o obj-$(CONFIG_NFT_COUNTER) += nft_counter.o obj-$(CONFIG_NFT_LOG) += nft_log.o obj-$(CONFIG_NFT_MASQ) += nft_masq.o +obj-$(CONFIG_NFT_REDIR) += nft_redir.o # generic X tables obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 024a2e25c8a49448afbeed4f780912b24a6318a3..fea9ef566427f9295adac5c05ef020463af046cd 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index fee7c64e4dd183e5e2fa9d312bbd5cf3134f59f8..974ff386db0fc118596d58311e1b6ba54cf8973a 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -147,16 +147,22 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize) #else #define __CIDR(cidr, i) (cidr) #endif + +/* cidr + 1 is stored in net_prefixes to support /0 */ +#define SCIDR(cidr, i) (__CIDR(cidr, i) + 1) + #ifdef IP_SET_HASH_WITH_NETS_PACKED -/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */ -#define CIDR(cidr, i) (__CIDR(cidr, i) + 1) +/* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */ +#define GCIDR(cidr, i) (__CIDR(cidr, i) + 1) +#define NCIDR(cidr) (cidr) #else -#define CIDR(cidr, i) (__CIDR(cidr, i)) +#define GCIDR(cidr, i) (__CIDR(cidr, i)) +#define NCIDR(cidr) (cidr - 1) #endif #define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128) -#ifdef IP_SET_HASH_WITH_MULTI +#ifdef IP_SET_HASH_WITH_NET0 #define NLEN(family) (SET_HOST_MASK(family) + 1) #else #define NLEN(family) SET_HOST_MASK(family) @@ -292,24 +298,22 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n) int i, j; /* Add in increasing prefix order, so larger cidr first */ - for (i = 0, j = -1; i < nets_length && h->nets[i].nets[n]; i++) { + for (i = 0, j = -1; i < nets_length && h->nets[i].cidr[n]; i++) { if (j != -1) continue; else if (h->nets[i].cidr[n] < cidr) j = i; else if (h->nets[i].cidr[n] == cidr) { - h->nets[i].nets[n]++; + h->nets[cidr - 1].nets[n]++; return; } } if (j != -1) { - for (; i > j; i--) { + for (; i > j; i--) h->nets[i].cidr[n] = h->nets[i - 1].cidr[n]; - h->nets[i].nets[n] = h->nets[i - 1].nets[n]; - } } h->nets[i].cidr[n] = cidr; - h->nets[i].nets[n] = 1; + h->nets[cidr - 1].nets[n] = 1; } static void @@ -320,16 +324,12 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n) for (i = 0; i < nets_length; i++) { if (h->nets[i].cidr[n] != cidr) continue; - if (h->nets[i].nets[n] > 1 || i == net_end || - h->nets[i + 1].nets[n] == 0) { - h->nets[i].nets[n]--; + h->nets[cidr -1].nets[n]--; + if (h->nets[cidr -1].nets[n] > 0) return; - } - for (j = i; j < net_end && h->nets[j].nets[n]; j++) { + for (j = i; j < net_end && h->nets[j].cidr[n]; j++) h->nets[j].cidr[n] = h->nets[j + 1].cidr[n]; - h->nets[j].nets[n] = h->nets[j + 1].nets[n]; - } - h->nets[j].nets[n] = 0; + h->nets[j].cidr[n] = 0; return; } } @@ -486,7 +486,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) pr_debug("expired %u/%u\n", i, j); #ifdef IP_SET_HASH_WITH_NETS for (k = 0; k < IPSET_NET_COUNT; k++) - mtype_del_cidr(h, CIDR(data->cidr, k), + mtype_del_cidr(h, SCIDR(data->cidr, k), nets_length, k); #endif ip_set_ext_destroy(set, data); @@ -633,29 +633,6 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, bool flag_exist = flags & IPSET_FLAG_EXIST; u32 key, multi = 0; - if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set)) { - rcu_read_lock_bh(); - t = rcu_dereference_bh(h->table); - key = HKEY(value, h->initval, t->htable_bits); - n = hbucket(t,key); - if (n->pos) { - /* Choosing the first entry in the array to replace */ - j = 0; - goto reuse_slot; - } - rcu_read_unlock_bh(); - } - if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem) - /* FIXME: when set is full, we slow down here */ - mtype_expire(set, h, NLEN(set->family), set->dsize); - - if (h->elements >= h->maxelem) { - if (net_ratelimit()) - pr_warn("Set %s is full, maxelem %u reached\n", - set->name, h->maxelem); - return -IPSET_ERR_HASH_FULL; - } - rcu_read_lock_bh(); t = rcu_dereference_bh(h->table); key = HKEY(value, h->initval, t->htable_bits); @@ -680,15 +657,32 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, j != AHASH_MAX(h) + 1) j = i; } + if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set) && n->pos) { + /* Choosing the first entry in the array to replace */ + j = 0; + goto reuse_slot; + } + if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem) + /* FIXME: when set is full, we slow down here */ + mtype_expire(set, h, NLEN(set->family), set->dsize); + + if (h->elements >= h->maxelem) { + if (net_ratelimit()) + pr_warn("Set %s is full, maxelem %u reached\n", + set->name, h->maxelem); + ret = -IPSET_ERR_HASH_FULL; + goto out; + } + reuse_slot: if (j != AHASH_MAX(h) + 1) { /* Fill out reused slot */ data = ahash_data(n, j, set->dsize); #ifdef IP_SET_HASH_WITH_NETS for (i = 0; i < IPSET_NET_COUNT; i++) { - mtype_del_cidr(h, CIDR(data->cidr, i), + mtype_del_cidr(h, SCIDR(data->cidr, i), NLEN(set->family), i); - mtype_add_cidr(h, CIDR(d->cidr, i), + mtype_add_cidr(h, SCIDR(d->cidr, i), NLEN(set->family), i); } #endif @@ -705,7 +699,7 @@ reuse_slot: data = ahash_data(n, n->pos++, set->dsize); #ifdef IP_SET_HASH_WITH_NETS for (i = 0; i < IPSET_NET_COUNT; i++) - mtype_add_cidr(h, CIDR(d->cidr, i), NLEN(set->family), + mtype_add_cidr(h, SCIDR(d->cidr, i), NLEN(set->family), i); #endif h->elements++; @@ -766,7 +760,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, h->elements--; #ifdef IP_SET_HASH_WITH_NETS for (j = 0; j < IPSET_NET_COUNT; j++) - mtype_del_cidr(h, CIDR(d->cidr, j), NLEN(set->family), + mtype_del_cidr(h, SCIDR(d->cidr, j), NLEN(set->family), j); #endif ip_set_ext_destroy(set, data); @@ -827,15 +821,15 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d, u8 nets_length = NLEN(set->family); pr_debug("test by nets\n"); - for (; j < nets_length && h->nets[j].nets[0] && !multi; j++) { + for (; j < nets_length && h->nets[j].cidr[0] && !multi; j++) { #if IPSET_NET_COUNT == 2 mtype_data_reset_elem(d, &orig); - mtype_data_netmask(d, h->nets[j].cidr[0], false); - for (k = 0; k < nets_length && h->nets[k].nets[1] && !multi; + mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0]), false); + for (k = 0; k < nets_length && h->nets[k].cidr[1] && !multi; k++) { - mtype_data_netmask(d, h->nets[k].cidr[1], true); + mtype_data_netmask(d, NCIDR(h->nets[k].cidr[1]), true); #else - mtype_data_netmask(d, h->nets[j].cidr[0]); + mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0])); #endif key = HKEY(d, h->initval, t->htable_bits); n = hbucket(t, key); @@ -883,7 +877,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, /* If we test an IP address and not a network address, * try all possible network sizes */ for (i = 0; i < IPSET_NET_COUNT; i++) - if (CIDR(d->cidr, i) != SET_HOST_MASK(set->family)) + if (GCIDR(d->cidr, i) != SET_HOST_MASK(set->family)) break; if (i == IPSET_NET_COUNT) { ret = mtype_test_cidrs(set, d, ext, mext, flags); @@ -1107,8 +1101,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, hsize = sizeof(*h); #ifdef IP_SET_HASH_WITH_NETS - hsize += sizeof(struct net_prefixes) * - (set->family == NFPROTO_IPV4 ? 32 : 128); + hsize += sizeof(struct net_prefixes) * NLEN(set->family); #endif h = kzalloc(hsize, GFP_KERNEL); if (!h) diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 35dd358734421fd5c7be3e4cc74d83a7074f7df5..758b002130d92f0e7fecf63627d426289cf03820 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -115,6 +115,7 @@ iface_add(struct rb_root *root, const char **iface) #define IP_SET_HASH_WITH_NETS #define IP_SET_HASH_WITH_RBTREE #define IP_SET_HASH_WITH_MULTI +#define IP_SET_HASH_WITH_NET0 #define STREQ(a, b) (strcmp(a, b) == 0) diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index da00284b3571a748d843d0d79f6a193a876000d2..ea8772afb6e70c4d366220fc7654f64793e4ef88 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c @@ -46,6 +46,7 @@ struct hash_netnet4_elem { __be64 ipcmp; }; u8 nomatch; + u8 padding; union { u8 cidr[2]; u16 ccmp; @@ -271,6 +272,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], struct hash_netnet6_elem { union nf_inet_addr ip[2]; u8 nomatch; + u8 padding; union { u8 cidr[2]; u16 ccmp; diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index b8053d675fc39ce0261dcf6270bc7616f383884c..bfaa94c7baa79300da255f9f7761f888b79136e7 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c @@ -53,6 +53,7 @@ struct hash_netportnet4_elem { u8 cidr[2]; u16 ccmp; }; + u16 padding; u8 nomatch:1; u8 proto; }; @@ -324,6 +325,7 @@ struct hash_netportnet6_elem { u8 cidr[2]; u16 ccmp; }; + u16 padding; u8 nomatch:1; u8 proto; }; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index ac7ba689efe76c3e3df89c44dbd0094b08b6cf89..b8295a430a5600d35b6de4163ba3b98e75c5f28c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -465,8 +465,7 @@ __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) static void ip_vs_service_free(struct ip_vs_service *svc) { - if (svc->stats.cpustats) - free_percpu(svc->stats.cpustats); + free_percpu(svc->stats.cpustats); kfree(svc); } diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c index 1a82b29ce8eab833cc83f05ba032431442b1c70d..0df17caa8af6cb82d022bc902602b64843a3fa54 100644 --- a/net/netfilter/ipvs/ip_vs_pe.c +++ b/net/netfilter/ipvs/ip_vs_pe.c @@ -37,8 +37,7 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name) rcu_read_unlock(); return pe; } - if (pe->module) - module_put(pe->module); + module_put(pe->module); } rcu_read_unlock(); diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c index 4dbcda6258bc2c2c20ecc71e130d25a462a90be4..199760c71f3995f88dbe73b159906cf7840cdded 100644 --- a/net/netfilter/ipvs/ip_vs_sched.c +++ b/net/netfilter/ipvs/ip_vs_sched.c @@ -104,8 +104,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name) mutex_unlock(&ip_vs_sched_mutex); return sched; } - if (sched->module) - module_put(sched->module); + module_put(sched->module); } mutex_unlock(&ip_vs_sched_mutex); diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 7162c86fd50dca443c0e7c3aa008cdb93038b7e3..c47ffd7a0a709cb73834c84652f251960f25db79 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -820,8 +820,7 @@ ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc, p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC); if (!p->pe_data) { - if (p->pe->module) - module_put(p->pe->module); + module_put(p->pe->module); return -ENOMEM; } p->pe_data_len = pe_data_len; diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index bd90bf8107dacb4e7e0683110bb2146faf168b26..3aedbda7658a4fbbbd54fcc683a09f089ffe6de5 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -293,7 +293,6 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, &dest->addr.ip, &dest_dst->dst_saddr.ip, atomic_read(&rt->dst.__refcnt)); } - daddr = dest->addr.ip; if (ret_saddr) *ret_saddr = dest_dst->dst_saddr.ip; } else { @@ -344,7 +343,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, skb_dst_drop(skb); if (noref) { if (!local) - skb_dst_set_noref_force(skb, &rt->dst); + skb_dst_set_noref(skb, &rt->dst); else skb_dst_set(skb, dst_clone(&rt->dst)); } else @@ -488,7 +487,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, skb_dst_drop(skb); if (noref) { if (!local) - skb_dst_set_noref_force(skb, &rt->dst); + skb_dst_set_noref(skb, &rt->dst); else skb_dst_set(skb, dst_clone(&rt->dst)); } else diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 5016a6929085ebdbf151a1f47582d36195885540..a11674806707e18fb9d86860ad0717dfda0b0da2 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -824,22 +824,19 @@ __nf_conntrack_alloc(struct net *net, u16 zone, atomic_dec(&net->ct.count); return ERR_PTR(-ENOMEM); } - /* - * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next - * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. - */ - memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, - offsetof(struct nf_conn, proto) - - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); spin_lock_init(&ct->lock); ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; /* save hash for reusing when confirming */ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; + ct->status = 0; /* Don't set timer yet: wait for confirmation */ setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); write_pnet(&ct->ct_net, net); + memset(&ct->__nfct_init_offset[0], 0, + offsetof(struct nf_conn, proto) - + offsetof(struct nf_conn, __nfct_init_offset[0])); #ifdef CONFIG_NF_CONNTRACK_ZONES if (zone) { struct nf_conntrack_zone *nf_ct_zone; diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 3a3a60b126e0097f309badc40ebf8016b3773c98..1d69f5b9748fd760630b998493d96ad84d998aba 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -728,7 +728,8 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, /* If the calling party is on the same side of the forward-to party, * we don't need to track the second call */ -static int callforward_do_filter(const union nf_inet_addr *src, +static int callforward_do_filter(struct net *net, + const union nf_inet_addr *src, const union nf_inet_addr *dst, u_int8_t family) { @@ -750,9 +751,9 @@ static int callforward_do_filter(const union nf_inet_addr *src, memset(&fl2, 0, sizeof(fl2)); fl2.daddr = dst->ip; - if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, + if (!afinfo->route(net, (struct dst_entry **)&rt1, flowi4_to_flowi(&fl1), false)) { - if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, + if (!afinfo->route(net, (struct dst_entry **)&rt2, flowi4_to_flowi(&fl2), false)) { if (rt_nexthop(rt1, fl1.daddr) == rt_nexthop(rt2, fl2.daddr) && @@ -774,9 +775,9 @@ static int callforward_do_filter(const union nf_inet_addr *src, memset(&fl2, 0, sizeof(fl2)); fl2.daddr = dst->in6; - if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, + if (!afinfo->route(net, (struct dst_entry **)&rt1, flowi6_to_flowi(&fl1), false)) { - if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, + if (!afinfo->route(net, (struct dst_entry **)&rt2, flowi6_to_flowi(&fl2), false)) { if (ipv6_addr_equal(rt6_nexthop(rt1), rt6_nexthop(rt2)) && @@ -807,6 +808,7 @@ static int expect_callforwarding(struct sk_buff *skb, __be16 port; union nf_inet_addr addr; struct nf_conntrack_expect *exp; + struct net *net = nf_ct_net(ct); typeof(nat_callforwarding_hook) nat_callforwarding; /* Read alternativeAddress */ @@ -816,7 +818,7 @@ static int expect_callforwarding(struct sk_buff *skb, /* If the calling party is on the same side of the forward-to party, * we don't need to track the second call */ if (callforward_filter && - callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3, + callforward_do_filter(net, &addr, &ct->tuplehash[!dir].tuple.src.u3, nf_ct_l3num(ct))) { pr_debug("nf_ct_q931: Call Forwarding not tracked\n"); return 0; diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 5b3eae7d4c9a51dd1bdd559fa5943125035d8490..bd9d31537905e4e372da427ac29ce5948f2af34c 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -250,7 +250,7 @@ out: } EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper); -/* appropiate ct lock protecting must be taken by caller */ +/* appropriate ct lock protecting must be taken by caller */ static inline int unhelp(struct nf_conntrack_tuple_hash *i, const struct nf_conntrack_helper *me) { diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 6e3b9117db1f799ded98e1473d4c4d03e10bfd16..43c926cae9c00de1306144abcddcf64f510c2864 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -19,6 +19,9 @@ static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; static DEFINE_MUTEX(nf_log_mutex); +#define nft_log_dereference(logger) \ + rcu_dereference_protected(logger, lockdep_is_held(&nf_log_mutex)) + static struct nf_logger *__find_logger(int pf, const char *str_logger) { struct nf_logger *log; @@ -28,8 +31,7 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger) if (loggers[pf][i] == NULL) continue; - log = rcu_dereference_protected(loggers[pf][i], - lockdep_is_held(&nf_log_mutex)); + log = nft_log_dereference(loggers[pf][i]); if (!strncasecmp(str_logger, log->name, strlen(log->name))) return log; } @@ -45,8 +47,7 @@ void nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger) return; mutex_lock(&nf_log_mutex); - log = rcu_dereference_protected(net->nf.nf_loggers[pf], - lockdep_is_held(&nf_log_mutex)); + log = nft_log_dereference(net->nf.nf_loggers[pf]); if (log == NULL) rcu_assign_pointer(net->nf.nf_loggers[pf], logger); @@ -61,8 +62,7 @@ void nf_log_unset(struct net *net, const struct nf_logger *logger) mutex_lock(&nf_log_mutex); for (i = 0; i < NFPROTO_NUMPROTO; i++) { - log = rcu_dereference_protected(net->nf.nf_loggers[i], - lockdep_is_held(&nf_log_mutex)); + log = nft_log_dereference(net->nf.nf_loggers[i]); if (log == logger) RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL); } @@ -75,6 +75,7 @@ EXPORT_SYMBOL(nf_log_unset); int nf_log_register(u_int8_t pf, struct nf_logger *logger) { int i; + int ret = 0; if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers)) return -EINVAL; @@ -82,16 +83,25 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger) mutex_lock(&nf_log_mutex); if (pf == NFPROTO_UNSPEC) { + for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { + if (rcu_access_pointer(loggers[i][logger->type])) { + ret = -EEXIST; + goto unlock; + } + } for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) rcu_assign_pointer(loggers[i][logger->type], logger); } else { - /* register at end of list to honor first register win */ + if (rcu_access_pointer(loggers[pf][logger->type])) { + ret = -EEXIST; + goto unlock; + } rcu_assign_pointer(loggers[pf][logger->type], logger); } +unlock: mutex_unlock(&nf_log_mutex); - - return 0; + return ret; } EXPORT_SYMBOL(nf_log_register); @@ -144,8 +154,7 @@ int nf_logger_find_get(int pf, enum nf_log_type type) struct nf_logger *logger; int ret = -ENOENT; - logger = loggers[pf][type]; - if (logger == NULL) + if (rcu_access_pointer(loggers[pf][type]) == NULL) request_module("nf-logger-%u-%u", pf, type); rcu_read_lock(); @@ -297,8 +306,7 @@ static int seq_show(struct seq_file *s, void *v) int i; struct net *net = seq_file_net(s); - logger = rcu_dereference_protected(net->nf.nf_loggers[*pos], - lockdep_is_held(&nf_log_mutex)); + logger = nft_log_dereference(net->nf.nf_loggers[*pos]); if (!logger) seq_printf(s, "%2lld NONE (", *pos); @@ -312,8 +320,7 @@ static int seq_show(struct seq_file *s, void *v) if (loggers[*pos][i] == NULL) continue; - logger = rcu_dereference_protected(loggers[*pos][i], - lockdep_is_held(&nf_log_mutex)); + logger = nft_log_dereference(loggers[*pos][i]); seq_printf(s, "%s", logger->name); if (i == 0 && loggers[*pos][i + 1] != NULL) seq_printf(s, ","); @@ -387,8 +394,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, mutex_unlock(&nf_log_mutex); } else { mutex_lock(&nf_log_mutex); - logger = rcu_dereference_protected(net->nf.nf_loggers[tindex], - lockdep_is_held(&nf_log_mutex)); + logger = nft_log_dereference(net->nf.nf_loggers[tindex]); if (!logger) table->data = "NONE"; else diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c new file mode 100644 index 0000000000000000000000000000000000000000..97b75f9bfbcd6852bdff9ca2d2e218b3539a0f8b --- /dev/null +++ b/net/netfilter/nf_nat_redirect.c @@ -0,0 +1,127 @@ +/* + * (C) 1999-2001 Paul `Rusty' Russell + * (C) 2002-2006 Netfilter Core Team + * Copyright (c) 2011 Patrick McHardy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6 + * NAT funded by Astaro. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned int +nf_nat_redirect_ipv4(struct sk_buff *skb, + const struct nf_nat_ipv4_multi_range_compat *mr, + unsigned int hooknum) +{ + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + __be32 newdst; + struct nf_nat_range newrange; + + NF_CT_ASSERT(hooknum == NF_INET_PRE_ROUTING || + hooknum == NF_INET_LOCAL_OUT); + + ct = nf_ct_get(skb, &ctinfo); + NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); + + /* Local packets: make them go to loopback */ + if (hooknum == NF_INET_LOCAL_OUT) { + newdst = htonl(0x7F000001); + } else { + struct in_device *indev; + struct in_ifaddr *ifa; + + newdst = 0; + + rcu_read_lock(); + indev = __in_dev_get_rcu(skb->dev); + if (indev != NULL) { + ifa = indev->ifa_list; + newdst = ifa->ifa_local; + } + rcu_read_unlock(); + + if (!newdst) + return NF_DROP; + } + + /* Transfer from original range. */ + memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); + memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); + newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; + newrange.min_addr.ip = newdst; + newrange.max_addr.ip = newdst; + newrange.min_proto = mr->range[0].min; + newrange.max_proto = mr->range[0].max; + + /* Hand modified range to generic setup. */ + return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); +} +EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4); + +static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT; + +unsigned int +nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, + unsigned int hooknum) +{ + struct nf_nat_range newrange; + struct in6_addr newdst; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + + ct = nf_ct_get(skb, &ctinfo); + if (hooknum == NF_INET_LOCAL_OUT) { + newdst = loopback_addr; + } else { + struct inet6_dev *idev; + struct inet6_ifaddr *ifa; + bool addr = false; + + rcu_read_lock(); + idev = __in6_dev_get(skb->dev); + if (idev != NULL) { + list_for_each_entry(ifa, &idev->addr_list, if_list) { + newdst = ifa->addr; + addr = true; + break; + } + } + rcu_read_unlock(); + + if (!addr) + return NF_DROP; + } + + newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; + newrange.min_addr.in6 = newdst; + newrange.max_addr.in6 = newdst; + newrange.min_proto = range->min_proto; + newrange.max_proto = range->max_proto; + + return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); +} +EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Patrick McHardy "); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 66e8425dbfe77b11e41eb7c97c68f3778ee0c9e7..129a8daa4abf31959801e99c4f2fbfc7f1aab230 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2477,7 +2477,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, const struct nfgenmsg *nfmsg = nlmsg_data(nlh); int err; - /* Verify existance before starting dump */ + /* Verify existence before starting dump */ err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); if (err < 0) return err; @@ -3665,8 +3665,7 @@ static int nf_tables_abort(struct sk_buff *skb) break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { - if (nft_trans_chain_stats(trans)) - free_percpu(nft_trans_chain_stats(trans)); + free_percpu(nft_trans_chain_stats(trans)); nft_trans_destroy(trans); } else { diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 5f1be5ba35595abb3e96a4d7c9e16fe2c8f4bd8c..11d85b3813f26ae87f465d7f4d09ae36a6d0167e 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -12,6 +12,9 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -337,9 +340,6 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size, skb = nfnetlink_alloc_skb(net, pkt_size, peer_portid, GFP_ATOMIC); - if (!skb) - pr_err("nfnetlink_log: can't even alloc %u bytes\n", - pkt_size); } } @@ -570,10 +570,8 @@ __build_packet_message(struct nfnl_log_net *log, struct nlattr *nla; int size = nla_attr_size(data_len); - if (skb_tailroom(inst->skb) < nla_total_size(data_len)) { - printk(KERN_WARNING "nfnetlink_log: no tailroom!\n"); - return -1; - } + if (skb_tailroom(inst->skb) < nla_total_size(data_len)) + goto nla_put_failure; nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len)); nla->nla_type = NFULA_PAYLOAD; @@ -1069,19 +1067,19 @@ static int __init nfnetlink_log_init(void) netlink_register_notifier(&nfulnl_rtnl_notifier); status = nfnetlink_subsys_register(&nfulnl_subsys); if (status < 0) { - pr_err("log: failed to create netlink socket\n"); + pr_err("failed to create netlink socket\n"); goto cleanup_netlink_notifier; } status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger); if (status < 0) { - pr_err("log: failed to register logger\n"); + pr_err("failed to register logger\n"); goto cleanup_subsys; } status = register_pernet_subsys(&nfnl_log_net_ops); if (status < 0) { - pr_err("log: failed to register pernet ops\n"); + pr_err("failed to register pernet ops\n"); goto cleanup_logger; } return status; diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 8892b7b6184aaf9beafac3da8a665f5f4b696247..1e316ce4cb5dedc6d4a97c6589cc4b6d19866dcd 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -65,7 +65,7 @@ static int nft_hash_insert(const struct nft_set *set, if (set->flags & NFT_SET_MAP) nft_data_copy(he->data, &elem->data); - rhashtable_insert(priv, &he->node, GFP_KERNEL); + rhashtable_insert(priv, &he->node); return 0; } @@ -88,7 +88,7 @@ static void nft_hash_remove(const struct nft_set *set, pprev = elem->cookie; he = rht_dereference((*pprev), priv); - rhashtable_remove_pprev(priv, he, pprev, GFP_KERNEL); + rhashtable_remove_pprev(priv, he, pprev); synchronize_rcu(); kfree(he); @@ -153,10 +153,12 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[]) return sizeof(struct rhashtable); } -static int lockdep_nfnl_lock_is_held(void) +#ifdef CONFIG_PROVE_LOCKING +static int lockdep_nfnl_lock_is_held(void *parent) { return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES); } +#endif static int nft_hash_init(const struct nft_set *set, const struct nft_set_desc *desc, @@ -171,7 +173,9 @@ static int nft_hash_init(const struct nft_set *set, .hashfn = jhash, .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, +#ifdef CONFIG_PROVE_LOCKING .mutex_is_held = lockdep_nfnl_lock_is_held, +#endif }; return rhashtable_init(priv, ¶ms); diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 1e7c076ca63ab92e062c56e68910a47b8b1dffc6..e99911eda91594a6b0f9ea9bcce0aa01b9fc334a 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -165,6 +165,12 @@ void nft_meta_get_eval(const struct nft_expr *expr, goto err; dest->data[0] = out->group; break; + case NFT_META_CGROUP: + if (skb->sk == NULL) + break; + + dest->data[0] = skb->sk->sk_classid; + break; default: WARN_ON(1); goto err; @@ -240,6 +246,7 @@ int nft_meta_get_init(const struct nft_ctx *ctx, case NFT_META_CPU: case NFT_META_IIFGROUP: case NFT_META_OIFGROUP: + case NFT_META_CGROUP: break; default: return -EOPNOTSUPP; diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c new file mode 100644 index 0000000000000000000000000000000000000000..9e8093f283113117ac7618c742624742e618ed3a --- /dev/null +++ b/net/netfilter/nft_redir.c @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2014 Arturo Borrero Gonzalez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = { + [NFTA_REDIR_REG_PROTO_MIN] = { .type = NLA_U32 }, + [NFTA_REDIR_REG_PROTO_MAX] = { .type = NLA_U32 }, + [NFTA_REDIR_FLAGS] = { .type = NLA_U32 }, +}; +EXPORT_SYMBOL_GPL(nft_redir_policy); + +int nft_redir_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_redir *priv = nft_expr_priv(expr); + int err; + + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + if (err < 0) + return err; + + if (tb[NFTA_REDIR_REG_PROTO_MIN]) { + priv->sreg_proto_min = + ntohl(nla_get_be32(tb[NFTA_REDIR_REG_PROTO_MIN])); + + err = nft_validate_input_register(priv->sreg_proto_min); + if (err < 0) + return err; + + if (tb[NFTA_REDIR_REG_PROTO_MAX]) { + priv->sreg_proto_max = + ntohl(nla_get_be32(tb[NFTA_REDIR_REG_PROTO_MAX])); + + err = nft_validate_input_register(priv->sreg_proto_max); + if (err < 0) + return err; + } else { + priv->sreg_proto_max = priv->sreg_proto_min; + } + } + + if (tb[NFTA_REDIR_FLAGS]) { + priv->flags = ntohl(nla_get_be32(tb[NFTA_REDIR_FLAGS])); + if (priv->flags & ~NF_NAT_RANGE_MASK) + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(nft_redir_init); + +int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr) +{ + const struct nft_redir *priv = nft_expr_priv(expr); + + if (priv->sreg_proto_min) { + if (nla_put_be32(skb, NFTA_REDIR_REG_PROTO_MIN, + htonl(priv->sreg_proto_min))) + goto nla_put_failure; + if (nla_put_be32(skb, NFTA_REDIR_REG_PROTO_MAX, + htonl(priv->sreg_proto_max))) + goto nla_put_failure; + } + + if (priv->flags != 0 && + nla_put_be32(skb, NFTA_REDIR_FLAGS, htonl(priv->flags))) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -1; +} +EXPORT_SYMBOL_GPL(nft_redir_dump); + +int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); +} +EXPORT_SYMBOL_GPL(nft_redir_validate); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arturo Borrero Gonzalez "); diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index ae8271652efa9cf5a6f939fcedc3238ca6c865c7..3f83d38c4e5bb975f80a6834965a18c92f254dcc 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c @@ -37,7 +37,8 @@ dscp_tg(struct sk_buff *skb, const struct xt_action_param *par) if (!skb_make_writable(skb, sizeof(struct iphdr))) return NF_DROP; - ipv4_change_dsfield(ip_hdr(skb), (__u8)(~XT_DSCP_MASK), + ipv4_change_dsfield(ip_hdr(skb), + (__force __u8)(~XT_DSCP_MASK), dinfo->dscp << XT_DSCP_SHIFT); } @@ -54,7 +55,8 @@ dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par) if (!skb_make_writable(skb, sizeof(struct ipv6hdr))) return NF_DROP; - ipv6_change_dsfield(ipv6_hdr(skb), (__u8)(~XT_DSCP_MASK), + ipv6_change_dsfield(ipv6_hdr(skb), + (__force __u8)(~XT_DSCP_MASK), dinfo->dscp << XT_DSCP_SHIFT); } return XT_CONTINUE; diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c index 22a10309297c33b257779618c54f2a95237d59d2..03f0b370e17876b5e1f4ed382260509bdf0f2235 100644 --- a/net/netfilter/xt_REDIRECT.c +++ b/net/netfilter/xt_REDIRECT.c @@ -26,48 +26,12 @@ #include #include #include - -static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT; +#include static unsigned int redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par) { - const struct nf_nat_range *range = par->targinfo; - struct nf_nat_range newrange; - struct in6_addr newdst; - enum ip_conntrack_info ctinfo; - struct nf_conn *ct; - - ct = nf_ct_get(skb, &ctinfo); - if (par->hooknum == NF_INET_LOCAL_OUT) - newdst = loopback_addr; - else { - struct inet6_dev *idev; - struct inet6_ifaddr *ifa; - bool addr = false; - - rcu_read_lock(); - idev = __in6_dev_get(skb->dev); - if (idev != NULL) { - list_for_each_entry(ifa, &idev->addr_list, if_list) { - newdst = ifa->addr; - addr = true; - break; - } - } - rcu_read_unlock(); - - if (!addr) - return NF_DROP; - } - - newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; - newrange.min_addr.in6 = newdst; - newrange.max_addr.in6 = newdst; - newrange.min_proto = range->min_proto; - newrange.max_proto = range->max_proto; - - return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); + return nf_nat_redirect_ipv6(skb, par->targinfo, par->hooknum); } static int redirect_tg6_checkentry(const struct xt_tgchk_param *par) @@ -98,48 +62,7 @@ static int redirect_tg4_check(const struct xt_tgchk_param *par) static unsigned int redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par) { - struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - __be32 newdst; - const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; - struct nf_nat_range newrange; - - NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || - par->hooknum == NF_INET_LOCAL_OUT); - - ct = nf_ct_get(skb, &ctinfo); - NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); - - /* Local packets: make them go to loopback */ - if (par->hooknum == NF_INET_LOCAL_OUT) - newdst = htonl(0x7F000001); - else { - struct in_device *indev; - struct in_ifaddr *ifa; - - newdst = 0; - - rcu_read_lock(); - indev = __in_dev_get_rcu(skb->dev); - if (indev && (ifa = indev->ifa_list)) - newdst = ifa->ifa_local; - rcu_read_unlock(); - - if (!newdst) - return NF_DROP; - } - - /* Transfer from original range. */ - memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); - memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); - newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; - newrange.min_addr.ip = newdst; - newrange.max_addr.ip = newdst; - newrange.min_proto = mr->range[0].min; - newrange.max_proto = mr->range[0].max; - - /* Hand modified range to generic setup. */ - return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); + return nf_nat_redirect_ipv4(skb, par->targinfo, par->hooknum); } static struct xt_target redirect_tg_reg[] __read_mostly = { diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index fbc66bb250d54de3d8cac6254110fedf1af7dd01..29ba6218a820e7cc8e9363db91312cc27c09004e 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -134,6 +134,7 @@ static bool add_hlist(struct hlist_head *head, static unsigned int check_hlist(struct net *net, struct hlist_head *head, const struct nf_conntrack_tuple *tuple, + u16 zone, bool *addit) { const struct nf_conntrack_tuple_hash *found; @@ -147,8 +148,7 @@ static unsigned int check_hlist(struct net *net, /* check the saved connections */ hlist_for_each_entry_safe(conn, n, head, node) { - found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, - &conn->tuple); + found = nf_conntrack_find_get(net, zone, &conn->tuple); if (found == NULL) { hlist_del(&conn->node); kmem_cache_free(connlimit_conn_cachep, conn); @@ -201,7 +201,7 @@ static unsigned int count_tree(struct net *net, struct rb_root *root, const struct nf_conntrack_tuple *tuple, const union nf_inet_addr *addr, const union nf_inet_addr *mask, - u8 family) + u8 family, u16 zone) { struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES]; struct rb_node **rbnode, *parent; @@ -229,7 +229,7 @@ count_tree(struct net *net, struct rb_root *root, } else { /* same source network -> be counted! */ unsigned int count; - count = check_hlist(net, &rbconn->hhead, tuple, &addit); + count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit); tree_nodes_free(root, gc_nodes, gc_count); if (!addit) @@ -245,7 +245,7 @@ count_tree(struct net *net, struct rb_root *root, continue; /* only used for GC on hhead, retval and 'addit' ignored */ - check_hlist(net, &rbconn->hhead, tuple, &addit); + check_hlist(net, &rbconn->hhead, tuple, zone, &addit); if (hlist_empty(&rbconn->hhead)) gc_nodes[gc_count++] = rbconn; } @@ -290,7 +290,7 @@ static int count_them(struct net *net, const struct nf_conntrack_tuple *tuple, const union nf_inet_addr *addr, const union nf_inet_addr *mask, - u_int8_t family) + u_int8_t family, u16 zone) { struct rb_root *root; int count; @@ -306,7 +306,7 @@ static int count_them(struct net *net, spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]); - count = count_tree(net, root, tuple, addr, mask, family); + count = count_tree(net, root, tuple, addr, mask, family, zone); spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]); @@ -324,13 +324,16 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) enum ip_conntrack_info ctinfo; const struct nf_conn *ct; unsigned int connections; + u16 zone = NF_CT_DEFAULT_ZONE; ct = nf_ct_get(skb, &ctinfo); - if (ct != NULL) + if (ct != NULL) { tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; - else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), - par->family, &tuple)) + zone = nf_ct_zone(ct); + } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), + par->family, &tuple)) { goto hotdrop; + } if (par->family == NFPROTO_IPV6) { const struct ipv6hdr *iph = ipv6_hdr(skb); @@ -343,7 +346,7 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) } connections = count_them(net, info->data, tuple_ptr, &addr, - &info->mask, par->family); + &info->mask, par->family, zone); if (connections == 0) /* kmalloc failed, drop it entirely */ goto hotdrop; diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index a9faae89f95533a53da4651efb2f737e3a0952b4..30dbe34915ae2b1fcf4d0ca3149369bdf3913f0f 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -43,25 +43,29 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_recent"); MODULE_ALIAS("ip6t_recent"); -static unsigned int ip_list_tot = 100; -static unsigned int ip_pkt_list_tot = 20; -static unsigned int ip_list_hash_size = 0; -static unsigned int ip_list_perms = 0644; -static unsigned int ip_list_uid = 0; -static unsigned int ip_list_gid = 0; +static unsigned int ip_list_tot __read_mostly = 100; +static unsigned int ip_list_hash_size __read_mostly; +static unsigned int ip_list_perms __read_mostly = 0644; +static unsigned int ip_list_uid __read_mostly; +static unsigned int ip_list_gid __read_mostly; module_param(ip_list_tot, uint, 0400); -module_param(ip_pkt_list_tot, uint, 0400); module_param(ip_list_hash_size, uint, 0400); module_param(ip_list_perms, uint, 0400); module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR); module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list"); -MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)"); MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs"); MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files"); MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files"); MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files"); +/* retained for backwards compatibility */ +static unsigned int ip_pkt_list_tot __read_mostly; +module_param(ip_pkt_list_tot, uint, 0400); +MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)"); + +#define XT_RECENT_MAX_NSTAMPS 256 + struct recent_entry { struct list_head list; struct list_head lru_list; @@ -79,6 +83,7 @@ struct recent_table { union nf_inet_addr mask; unsigned int refcnt; unsigned int entries; + u8 nstamps_max_mask; struct list_head lru_list; struct list_head iphash[0]; }; @@ -90,7 +95,8 @@ struct recent_net { #endif }; -static int recent_net_id; +static int recent_net_id __read_mostly; + static inline struct recent_net *recent_pernet(struct net *net) { return net_generic(net, recent_net_id); @@ -171,12 +177,15 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr, u_int16_t family, u_int8_t ttl) { struct recent_entry *e; + unsigned int nstamps_max = t->nstamps_max_mask; if (t->entries >= ip_list_tot) { e = list_entry(t->lru_list.next, struct recent_entry, lru_list); recent_entry_remove(t, e); } - e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * ip_pkt_list_tot, + + nstamps_max += 1; + e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * nstamps_max, GFP_ATOMIC); if (e == NULL) return NULL; @@ -197,7 +206,7 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr, static void recent_entry_update(struct recent_table *t, struct recent_entry *e) { - e->index %= ip_pkt_list_tot; + e->index &= t->nstamps_max_mask; e->stamps[e->index++] = jiffies; if (e->index > e->nstamps) e->nstamps = e->index; @@ -326,6 +335,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par, kuid_t uid; kgid_t gid; #endif + unsigned int nstamp_mask; unsigned int i; int ret = -EINVAL; size_t sz; @@ -349,19 +359,33 @@ static int recent_mt_check(const struct xt_mtchk_param *par, return -EINVAL; if ((info->check_set & XT_RECENT_REAP) && !info->seconds) return -EINVAL; - if (info->hit_count > ip_pkt_list_tot) { - pr_info("hitcount (%u) is larger than " - "packets to be remembered (%u)\n", - info->hit_count, ip_pkt_list_tot); + if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) { + pr_info("hitcount (%u) is larger than allowed maximum (%u)\n", + info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); return -EINVAL; } if (info->name[0] == '\0' || strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) return -EINVAL; + if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot) + nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1; + else if (info->hit_count) + nstamp_mask = roundup_pow_of_two(info->hit_count) - 1; + else + nstamp_mask = 32 - 1; + mutex_lock(&recent_mutex); t = recent_table_lookup(recent_net, info->name); if (t != NULL) { + if (info->hit_count > t->nstamps_max_mask) { + pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n", + info->hit_count, t->nstamps_max_mask + 1, + info->name); + ret = -EINVAL; + goto out; + } + t->refcnt++; ret = 0; goto out; @@ -377,6 +401,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par, goto out; } t->refcnt = 1; + t->nstamps_max_mask = nstamp_mask; memcpy(&t->mask, &info->mask, sizeof(t->mask)); strcpy(t->name, info->name); @@ -497,9 +522,12 @@ static void recent_seq_stop(struct seq_file *s, void *v) static int recent_seq_show(struct seq_file *seq, void *v) { const struct recent_entry *e = v; + struct recent_iter_state *st = seq->private; + const struct recent_table *t = st->table; unsigned int i; - i = (e->index - 1) % ip_pkt_list_tot; + i = (e->index - 1) & t->nstamps_max_mask; + if (e->family == NFPROTO_IPV4) seq_printf(seq, "src=%pI4 ttl: %u last_seen: %lu oldest_pkt: %u", &e->addr.ip, e->ttl, e->stamps[i], e->index); @@ -717,7 +745,9 @@ static int __init recent_mt_init(void) { int err; - if (!ip_list_tot || !ip_pkt_list_tot || ip_pkt_list_tot > 255) + BUILD_BUG_ON_NOT_POWER_OF_2(XT_RECENT_MAX_NSTAMPS); + + if (!ip_list_tot || ip_pkt_list_tot >= XT_RECENT_MAX_NSTAMPS) return -EINVAL; ip_list_hash_size = 1 << fls(ip_list_tot); diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 5732cd64acc0d579dd423b0ef23d9b65d77ba5f4..0d47afea968240623ae4486d56c9a87fe2b7b121 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -157,7 +157,7 @@ set_match_v1_destroy(const struct xt_mtdtor_param *par) /* Revision 3 match */ static bool -match_counter(u64 counter, const struct ip_set_counter_match *info) +match_counter0(u64 counter, const struct ip_set_counter_match0 *info) { switch (info->op) { case IPSET_COUNTER_NONE: @@ -192,14 +192,60 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par) if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS)) return ret; - if (!match_counter(opt.ext.packets, &info->packets)) + if (!match_counter0(opt.ext.packets, &info->packets)) return 0; - return match_counter(opt.ext.bytes, &info->bytes); + return match_counter0(opt.ext.bytes, &info->bytes); } #define set_match_v3_checkentry set_match_v1_checkentry #define set_match_v3_destroy set_match_v1_destroy +/* Revision 4 match */ + +static bool +match_counter(u64 counter, const struct ip_set_counter_match *info) +{ + switch (info->op) { + case IPSET_COUNTER_NONE: + return true; + case IPSET_COUNTER_EQ: + return counter == info->value; + case IPSET_COUNTER_NE: + return counter != info->value; + case IPSET_COUNTER_LT: + return counter < info->value; + case IPSET_COUNTER_GT: + return counter > info->value; + } + return false; +} + +static bool +set_match_v4(const struct sk_buff *skb, struct xt_action_param *par) +{ + const struct xt_set_info_match_v4 *info = par->matchinfo; + ADT_OPT(opt, par->family, info->match_set.dim, + info->match_set.flags, info->flags, UINT_MAX); + int ret; + + if (info->packets.op != IPSET_COUNTER_NONE || + info->bytes.op != IPSET_COUNTER_NONE) + opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS; + + ret = match_set(info->match_set.index, skb, par, &opt, + info->match_set.flags & IPSET_INV_MATCH); + + if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS)) + return ret; + + if (!match_counter(opt.ext.packets, &info->packets)) + return 0; + return match_counter(opt.ext.bytes, &info->bytes); +} + +#define set_match_v4_checkentry set_match_v1_checkentry +#define set_match_v4_destroy set_match_v1_destroy + /* Revision 0 interface: backward compatible with netfilter/iptables */ static unsigned int @@ -573,6 +619,27 @@ static struct xt_match set_matches[] __read_mostly = { .destroy = set_match_v3_destroy, .me = THIS_MODULE }, + /* new revision for counters support: update, match */ + { + .name = "set", + .family = NFPROTO_IPV4, + .revision = 4, + .match = set_match_v4, + .matchsize = sizeof(struct xt_set_info_match_v4), + .checkentry = set_match_v4_checkentry, + .destroy = set_match_v4_destroy, + .me = THIS_MODULE + }, + { + .name = "set", + .family = NFPROTO_IPV6, + .revision = 4, + .match = set_match_v4, + .matchsize = sizeof(struct xt_set_info_match_v4), + .checkentry = set_match_v4_checkentry, + .destroy = set_match_v4_destroy, + .me = THIS_MODULE + }, }; static struct xt_target set_targets[] __read_mostly = { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 0007b818039708bf447d74252712d93f9a8889e0..ef5f77b44ec72131e45b86456c83be7462da224e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -114,14 +114,14 @@ static atomic_t nl_table_users = ATOMIC_INIT(0); DEFINE_MUTEX(nl_sk_hash_lock); EXPORT_SYMBOL_GPL(nl_sk_hash_lock); -static int lockdep_nl_sk_hash_is_held(void) +#ifdef CONFIG_PROVE_LOCKING +static int lockdep_nl_sk_hash_is_held(void *parent) { -#ifdef CONFIG_LOCKDEP if (debug_locks) return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock); -#endif return 1; } +#endif static ATOMIC_NOTIFIER_HEAD(netlink_chain); @@ -142,8 +142,7 @@ int netlink_add_tap(struct netlink_tap *nt) list_add_rcu(&nt->list, &netlink_tap_all); spin_unlock(&netlink_tap_lock); - if (nt->module) - __module_get(nt->module); + __module_get(nt->module); return 0; } @@ -1092,7 +1091,7 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid) nlk_sk(sk)->portid = portid; sock_hold(sk); - rhashtable_insert(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL); + rhashtable_insert(&table->hash, &nlk_sk(sk)->node); err = 0; err: mutex_unlock(&nl_sk_hash_lock); @@ -1105,7 +1104,7 @@ static void netlink_remove(struct sock *sk) mutex_lock(&nl_sk_hash_lock); table = &nl_table[sk->sk_protocol]; - if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL)) { + if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) { WARN_ON(atomic_read(&sk->sk_refcnt) == 1); __sock_put(sk); } @@ -2306,7 +2305,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, } if (netlink_tx_is_mmaped(sk) && - msg->msg_iov->iov_base == NULL) { + msg->msg_iter.iov->iov_base == NULL) { err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb); goto out; @@ -2326,7 +2325,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, NETLINK_CB(skb).flags = netlink_skb_flags; err = -EFAULT; - if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + if (memcpy_from_msg(skb_put(skb, len), msg, len)) { kfree_skb(skb); goto out; } @@ -2401,7 +2400,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, } skb_reset_transport_header(data_skb); - err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(data_skb, 0, msg, copied); if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name); @@ -3130,11 +3129,13 @@ static int __init netlink_proto_init(void) .head_offset = offsetof(struct netlink_sock, node), .key_offset = offsetof(struct netlink_sock, portid), .key_len = sizeof(u32), /* portid */ - .hashfn = arch_fast_hash, + .hashfn = jhash, .max_shift = 16, /* 64K */ .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, +#ifdef CONFIG_PROVE_LOCKING .mutex_is_held = lockdep_nl_sk_hash_is_held, +#endif }; if (err != 0) diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 1b06a1fcf3e80acfe26a67e31dec1648ce6104f5..69f1d5e9959f23646e82686cf5d17e1685c34c4f 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1113,7 +1113,7 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, skb_put(skb, len); /* User data follows immediately after the NET/ROM transport header */ - if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) { + if (memcpy_from_msg(skb_transport_header(skb), msg, len)) { kfree_skb(skb); err = -EFAULT; goto out; @@ -1167,7 +1167,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + er = skb_copy_datagram_msg(skb, 0, msg, copied); if (er < 0) { skb_free_datagram(sk, skb); release_sock(sk); diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c index b60aa35c074f5220f0b939b347881cab8e6c9db8..f72be7433df35fc1acfa3105a151b40876c0d47f 100644 --- a/net/nfc/digital_dep.c +++ b/net/nfc/digital_dep.c @@ -17,6 +17,9 @@ #include "digital.h" +#define DIGITAL_NFC_DEP_N_RETRY_NACK 2 +#define DIGITAL_NFC_DEP_N_RETRY_ATN 2 + #define DIGITAL_NFC_DEP_FRAME_DIR_OUT 0xD4 #define DIGITAL_NFC_DEP_FRAME_DIR_IN 0xD5 @@ -32,20 +35,32 @@ #define DIGITAL_ATR_REQ_MIN_SIZE 16 #define DIGITAL_ATR_REQ_MAX_SIZE 64 -#define DIGITAL_LR_BITS_PAYLOAD_SIZE_254B 0x30 -#define DIGITAL_FSL_BITS_PAYLOAD_SIZE_254B \ - (DIGITAL_LR_BITS_PAYLOAD_SIZE_254B >> 4) +#define DIGITAL_DID_MAX 14 + +#define DIGITAL_PAYLOAD_SIZE_MAX 254 +#define DIGITAL_PAYLOAD_BITS_TO_PP(s) (((s) & 0x3) << 4) +#define DIGITAL_PAYLOAD_PP_TO_BITS(s) (((s) >> 4) & 0x3) +#define DIGITAL_PAYLOAD_BITS_TO_FSL(s) ((s) & 0x3) +#define DIGITAL_PAYLOAD_FSL_TO_BITS(s) ((s) & 0x3) + #define DIGITAL_GB_BIT 0x02 +#define DIGITAL_NFC_DEP_REQ_RES_HEADROOM 2 /* SoD: [SB (NFC-A)] + LEN */ +#define DIGITAL_NFC_DEP_REQ_RES_TAILROOM 2 /* EoD: 2-byte CRC */ + #define DIGITAL_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0) #define DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT 0x10 +#define DIGITAL_NFC_DEP_PFB_MI_BIT 0x10 +#define DIGITAL_NFC_DEP_PFB_NACK_BIT 0x10 +#define DIGITAL_NFC_DEP_PFB_DID_BIT 0x04 #define DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb) \ ((pfb) & DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT) -#define DIGITAL_NFC_DEP_MI_BIT_SET(pfb) ((pfb) & 0x10) +#define DIGITAL_NFC_DEP_MI_BIT_SET(pfb) ((pfb) & DIGITAL_NFC_DEP_PFB_MI_BIT) +#define DIGITAL_NFC_DEP_NACK_BIT_SET(pfb) ((pfb) & DIGITAL_NFC_DEP_PFB_NACK_BIT) #define DIGITAL_NFC_DEP_NAD_BIT_SET(pfb) ((pfb) & 0x08) -#define DIGITAL_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & 0x04) +#define DIGITAL_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & DIGITAL_NFC_DEP_PFB_DID_BIT) #define DIGITAL_NFC_DEP_PFB_PNI(pfb) ((pfb) & 0x03) #define DIGITAL_NFC_DEP_PFB_I_PDU 0x00 @@ -97,6 +112,34 @@ struct digital_dep_req_res { static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp); +static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg, + struct sk_buff *resp); + +static const u8 digital_payload_bits_map[4] = { + [0] = 64, + [1] = 128, + [2] = 192, + [3] = 254 +}; + +static u8 digital_payload_bits_to_size(u8 payload_bits) +{ + if (payload_bits >= ARRAY_SIZE(digital_payload_bits_map)) + return 0; + + return digital_payload_bits_map[payload_bits]; +} + +static u8 digital_payload_size_to_bits(u8 payload_size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(digital_payload_bits_map); i++) + if (digital_payload_bits_map[i] == payload_size) + return i; + + return 0xff; +} static void digital_skb_push_dep_sod(struct nfc_digital_dev *ddev, struct sk_buff *skb) @@ -129,6 +172,106 @@ static int digital_skb_pull_dep_sod(struct nfc_digital_dev *ddev, return 0; } +static struct sk_buff * +digital_send_dep_data_prep(struct nfc_digital_dev *ddev, struct sk_buff *skb, + struct digital_dep_req_res *dep_req_res, + struct digital_data_exch *data_exch) +{ + struct sk_buff *new_skb; + + if (skb->len > ddev->remote_payload_max) { + dep_req_res->pfb |= DIGITAL_NFC_DEP_PFB_MI_BIT; + + new_skb = digital_skb_alloc(ddev, ddev->remote_payload_max); + if (!new_skb) { + kfree_skb(ddev->chaining_skb); + ddev->chaining_skb = NULL; + + return ERR_PTR(-ENOMEM); + } + + skb_reserve(new_skb, ddev->tx_headroom + NFC_HEADER_SIZE + + DIGITAL_NFC_DEP_REQ_RES_HEADROOM); + memcpy(skb_put(new_skb, ddev->remote_payload_max), skb->data, + ddev->remote_payload_max); + skb_pull(skb, ddev->remote_payload_max); + + ddev->chaining_skb = skb; + ddev->data_exch = data_exch; + } else { + ddev->chaining_skb = NULL; + new_skb = skb; + } + + return new_skb; +} + +static struct sk_buff * +digital_recv_dep_data_gather(struct nfc_digital_dev *ddev, u8 pfb, + struct sk_buff *resp, + int (*send_ack)(struct nfc_digital_dev *ddev, + struct digital_data_exch + *data_exch), + struct digital_data_exch *data_exch) +{ + struct sk_buff *new_skb; + int rc; + + if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb) && (!ddev->chaining_skb)) { + ddev->chaining_skb = + nfc_alloc_recv_skb(8 * ddev->local_payload_max, + GFP_KERNEL); + if (!ddev->chaining_skb) { + rc = -ENOMEM; + goto error; + } + } + + if (ddev->chaining_skb) { + if (resp->len > skb_tailroom(ddev->chaining_skb)) { + new_skb = skb_copy_expand(ddev->chaining_skb, + skb_headroom( + ddev->chaining_skb), + 8 * ddev->local_payload_max, + GFP_KERNEL); + if (!new_skb) { + rc = -ENOMEM; + goto error; + } + + kfree_skb(ddev->chaining_skb); + ddev->chaining_skb = new_skb; + } + + memcpy(skb_put(ddev->chaining_skb, resp->len), resp->data, + resp->len); + + kfree_skb(resp); + resp = NULL; + + if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb)) { + rc = send_ack(ddev, data_exch); + if (rc) + goto error; + + return NULL; + } + + resp = ddev->chaining_skb; + ddev->chaining_skb = NULL; + } + + return resp; + +error: + kfree_skb(resp); + + kfree_skb(ddev->chaining_skb); + ddev->chaining_skb = NULL; + + return ERR_PTR(rc); +} + static void digital_in_recv_psl_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { @@ -198,6 +341,8 @@ static int digital_in_send_psl_req(struct nfc_digital_dev *ddev, { struct sk_buff *skb; struct digital_psl_req *psl_req; + int rc; + u8 payload_size, payload_bits; skb = digital_skb_alloc(ddev, sizeof(*psl_req)); if (!skb) @@ -211,14 +356,24 @@ static int digital_in_send_psl_req(struct nfc_digital_dev *ddev, psl_req->cmd = DIGITAL_CMD_PSL_REQ; psl_req->did = 0; psl_req->brs = (0x2 << 3) | 0x2; /* 424F both directions */ - psl_req->fsl = DIGITAL_FSL_BITS_PAYLOAD_SIZE_254B; + + payload_size = min(ddev->local_payload_max, ddev->remote_payload_max); + payload_bits = digital_payload_size_to_bits(payload_size); + psl_req->fsl = DIGITAL_PAYLOAD_BITS_TO_FSL(payload_bits); + + ddev->local_payload_max = payload_size; + ddev->remote_payload_max = payload_size; digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); - return digital_in_send_cmd(ddev, skb, 500, digital_in_recv_psl_res, - target); + rc = digital_in_send_cmd(ddev, skb, 500, digital_in_recv_psl_res, + target); + if (rc) + kfree_skb(skb); + + return rc; } static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg, @@ -226,7 +381,7 @@ static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg, { struct nfc_target *target = arg; struct digital_atr_res *atr_res; - u8 gb_len; + u8 gb_len, payload_bits; int rc; if (IS_ERR(resp)) { @@ -256,6 +411,14 @@ static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg, atr_res = (struct digital_atr_res *)resp->data; + payload_bits = DIGITAL_PAYLOAD_PP_TO_BITS(atr_res->pp); + ddev->remote_payload_max = digital_payload_bits_to_size(payload_bits); + + if (!ddev->remote_payload_max) { + rc = -EINVAL; + goto exit; + } + rc = nfc_set_remote_general_bytes(ddev->nfc_dev, atr_res->gb, gb_len); if (rc) goto exit; @@ -286,6 +449,8 @@ int digital_in_send_atr_req(struct nfc_digital_dev *ddev, struct sk_buff *skb; struct digital_atr_req *atr_req; uint size; + int rc; + u8 payload_bits; size = DIGITAL_ATR_REQ_MIN_SIZE + gb_len; @@ -314,7 +479,9 @@ int digital_in_send_atr_req(struct nfc_digital_dev *ddev, atr_req->bs = 0; atr_req->br = 0; - atr_req->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B; + ddev->local_payload_max = DIGITAL_PAYLOAD_SIZE_MAX; + payload_bits = digital_payload_size_to_bits(ddev->local_payload_max); + atr_req->pp = DIGITAL_PAYLOAD_BITS_TO_PP(payload_bits); if (gb_len) { atr_req->pp |= DIGITAL_GB_BIT; @@ -325,8 +492,113 @@ int digital_in_send_atr_req(struct nfc_digital_dev *ddev, ddev->skb_add_crc(skb); - return digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res, - target); + rc = digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res, + target); + if (rc) + kfree_skb(skb); + + return rc; +} + +static int digital_in_send_ack(struct nfc_digital_dev *ddev, + struct digital_data_exch *data_exch) +{ + struct digital_dep_req_res *dep_req; + struct sk_buff *skb; + int rc; + + skb = digital_skb_alloc(ddev, 1); + if (!skb) + return -ENOMEM; + + skb_push(skb, sizeof(struct digital_dep_req_res)); + + dep_req = (struct digital_dep_req_res *)skb->data; + + dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; + dep_req->cmd = DIGITAL_CMD_DEP_REQ; + dep_req->pfb = DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU | + ddev->curr_nfc_dep_pni; + + digital_skb_push_dep_sod(ddev, skb); + + ddev->skb_add_crc(skb); + + ddev->saved_skb = skb_get(skb); + ddev->saved_skb_len = skb->len; + + rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res, + data_exch); + if (rc) { + kfree_skb(skb); + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + } + + return rc; +} + +static int digital_in_send_nack(struct nfc_digital_dev *ddev, + struct digital_data_exch *data_exch) +{ + struct digital_dep_req_res *dep_req; + struct sk_buff *skb; + int rc; + + skb = digital_skb_alloc(ddev, 1); + if (!skb) + return -ENOMEM; + + skb_push(skb, sizeof(struct digital_dep_req_res)); + + dep_req = (struct digital_dep_req_res *)skb->data; + + dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; + dep_req->cmd = DIGITAL_CMD_DEP_REQ; + dep_req->pfb = DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU | + DIGITAL_NFC_DEP_PFB_NACK_BIT | ddev->curr_nfc_dep_pni; + + digital_skb_push_dep_sod(ddev, skb); + + ddev->skb_add_crc(skb); + + rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res, + data_exch); + if (rc) + kfree_skb(skb); + + return rc; +} + +static int digital_in_send_atn(struct nfc_digital_dev *ddev, + struct digital_data_exch *data_exch) +{ + struct digital_dep_req_res *dep_req; + struct sk_buff *skb; + int rc; + + skb = digital_skb_alloc(ddev, 1); + if (!skb) + return -ENOMEM; + + skb_push(skb, sizeof(struct digital_dep_req_res)); + + dep_req = (struct digital_dep_req_res *)skb->data; + + dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; + dep_req->cmd = DIGITAL_CMD_DEP_REQ; + dep_req->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU; + + digital_skb_push_dep_sod(ddev, skb); + + ddev->skb_add_crc(skb); + + rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res, + data_exch); + if (rc) + kfree_skb(skb); + + return rc; } static int digital_in_send_rtox(struct nfc_digital_dev *ddev, @@ -355,12 +627,30 @@ static int digital_in_send_rtox(struct nfc_digital_dev *ddev, ddev->skb_add_crc(skb); + ddev->saved_skb = skb_get(skb); + ddev->saved_skb_len = skb->len; + rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res, data_exch); + if (rc) { + kfree_skb(skb); + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + } return rc; } +static int digital_in_send_saved_skb(struct nfc_digital_dev *ddev, + struct digital_data_exch *data_exch) +{ + skb_get(ddev->saved_skb); + skb_push(ddev->saved_skb, ddev->saved_skb_len); + + return digital_in_send_cmd(ddev, ddev->saved_skb, 1500, + digital_in_recv_dep_res, data_exch); +} + static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { @@ -373,25 +663,67 @@ static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg, if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; + + if (((rc != -ETIMEDOUT) || ddev->nack_count) && + (ddev->nack_count++ < DIGITAL_NFC_DEP_N_RETRY_NACK)) { + ddev->atn_count = 0; + + rc = digital_in_send_nack(ddev, data_exch); + if (rc) + goto error; + + return; + } else if ((rc == -ETIMEDOUT) && + (ddev->atn_count++ < DIGITAL_NFC_DEP_N_RETRY_ATN)) { + ddev->nack_count = 0; + + rc = digital_in_send_atn(ddev, data_exch); + if (rc) + goto error; + + return; + } + + goto exit; + } + + rc = digital_skb_pull_dep_sod(ddev, resp); + if (rc) { + PROTOCOL_ERR("14.4.1.2"); goto exit; } rc = ddev->skb_check_crc(resp); if (rc) { + if ((resp->len >= 4) && + (ddev->nack_count++ < DIGITAL_NFC_DEP_N_RETRY_NACK)) { + ddev->atn_count = 0; + + rc = digital_in_send_nack(ddev, data_exch); + if (rc) + goto error; + + kfree_skb(resp); + + return; + } + PROTOCOL_ERR("14.4.1.6"); goto error; } - rc = digital_skb_pull_dep_sod(ddev, resp); - if (rc) { - PROTOCOL_ERR("14.4.1.2"); + ddev->atn_count = 0; + ddev->nack_count = 0; + + if (resp->len > ddev->local_payload_max) { + rc = -EMSGSIZE; goto exit; } + size = sizeof(struct digital_dep_req_res); dep_res = (struct digital_dep_req_res *)resp->data; - if (resp->len < sizeof(struct digital_dep_req_res) || - dep_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN || + if (resp->len < size || dep_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN || dep_res->cmd != DIGITAL_CMD_DEP_RES) { rc = -EIO; goto error; @@ -399,6 +731,24 @@ static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg, pfb = dep_res->pfb; + if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb)) { + PROTOCOL_ERR("14.8.2.1"); + rc = -EIO; + goto error; + } + + if (DIGITAL_NFC_DEP_NAD_BIT_SET(pfb)) { + rc = -EIO; + goto exit; + } + + if (size > resp->len) { + rc = -EIO; + goto error; + } + + skb_pull(resp, size); + switch (DIGITAL_NFC_DEP_PFB_TYPE(pfb)) { case DIGITAL_NFC_DEP_PFB_I_PDU: if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) { @@ -409,21 +759,71 @@ static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg, ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1); + + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + + resp = digital_recv_dep_data_gather(ddev, pfb, resp, + digital_in_send_ack, + data_exch); + if (IS_ERR(resp)) { + rc = PTR_ERR(resp); + resp = NULL; + goto error; + } + + /* If resp is NULL then we're still chaining so return and + * wait for the next part of the PDU. Else, the PDU is + * complete so pass it up. + */ + if (!resp) + return; + rc = 0; break; case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU: + if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) { + PROTOCOL_ERR("14.12.3.3"); + rc = -EIO; + goto exit; + } + + ddev->curr_nfc_dep_pni = + DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1); + + if (ddev->chaining_skb && !DIGITAL_NFC_DEP_NACK_BIT_SET(pfb)) { + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + + rc = digital_in_send_dep_req(ddev, NULL, + ddev->chaining_skb, + ddev->data_exch); + if (rc) + goto error; + + return; + } + pr_err("Received a ACK/NACK PDU\n"); - rc = -EIO; - goto error; + rc = -EINVAL; + goto exit; case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU: - if (!DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) { - rc = -EINVAL; - goto error; + if (!DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) { /* ATN */ + rc = digital_in_send_saved_skb(ddev, data_exch); + if (rc) { + kfree_skb(ddev->saved_skb); + goto error; + } + + return; } - rc = digital_in_send_rtox(ddev, data_exch, resp->data[3]); + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + + rc = digital_in_send_rtox(ddev, data_exch, resp->data[0]); if (rc) goto error; @@ -431,30 +831,18 @@ static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg, return; } - if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb)) { - pr_err("MI bit set. Chained PDU not supported\n"); - rc = -EIO; - goto error; - } - - size = sizeof(struct digital_dep_req_res); - - if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb)) - size++; - - if (size > resp->len) { - rc = -EIO; - goto error; - } - - skb_pull(resp, size); - exit: data_exch->cb(data_exch->cb_context, resp, rc); error: kfree(data_exch); + kfree_skb(ddev->chaining_skb); + ddev->chaining_skb = NULL; + + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + if (rc) kfree_skb(resp); } @@ -464,20 +852,47 @@ int digital_in_send_dep_req(struct nfc_digital_dev *ddev, struct digital_data_exch *data_exch) { struct digital_dep_req_res *dep_req; + struct sk_buff *chaining_skb, *tmp_skb; + int rc; skb_push(skb, sizeof(struct digital_dep_req_res)); dep_req = (struct digital_dep_req_res *)skb->data; + dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; dep_req->cmd = DIGITAL_CMD_DEP_REQ; dep_req->pfb = ddev->curr_nfc_dep_pni; - digital_skb_push_dep_sod(ddev, skb); + ddev->atn_count = 0; + ddev->nack_count = 0; - ddev->skb_add_crc(skb); + chaining_skb = ddev->chaining_skb; + + tmp_skb = digital_send_dep_data_prep(ddev, skb, dep_req, data_exch); + if (IS_ERR(tmp_skb)) + return PTR_ERR(tmp_skb); + + digital_skb_push_dep_sod(ddev, tmp_skb); + + ddev->skb_add_crc(tmp_skb); - return digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res, - data_exch); + ddev->saved_skb = skb_get(tmp_skb); + ddev->saved_skb_len = tmp_skb->len; + + rc = digital_in_send_cmd(ddev, tmp_skb, 1500, digital_in_recv_dep_res, + data_exch); + if (rc) { + if (tmp_skb != skb) + kfree_skb(tmp_skb); + + kfree_skb(chaining_skb); + ddev->chaining_skb = NULL; + + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + } + + return rc; } static void digital_tg_set_rf_tech(struct nfc_digital_dev *ddev, u8 rf_tech) @@ -507,11 +922,106 @@ static void digital_tg_set_rf_tech(struct nfc_digital_dev *ddev, u8 rf_tech) } } +static int digital_tg_send_ack(struct nfc_digital_dev *ddev, + struct digital_data_exch *data_exch) +{ + struct digital_dep_req_res *dep_res; + struct sk_buff *skb; + int rc; + + skb = digital_skb_alloc(ddev, 1); + if (!skb) + return -ENOMEM; + + skb_push(skb, sizeof(struct digital_dep_req_res)); + + dep_res = (struct digital_dep_req_res *)skb->data; + + dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN; + dep_res->cmd = DIGITAL_CMD_DEP_RES; + dep_res->pfb = DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU | + ddev->curr_nfc_dep_pni; + + if (ddev->did) { + dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT; + + memcpy(skb_put(skb, sizeof(ddev->did)), &ddev->did, + sizeof(ddev->did)); + } + + ddev->curr_nfc_dep_pni = + DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1); + + digital_skb_push_dep_sod(ddev, skb); + + ddev->skb_add_crc(skb); + + ddev->saved_skb = skb_get(skb); + ddev->saved_skb_len = skb->len; + + rc = digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req, + data_exch); + if (rc) { + kfree_skb(skb); + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + } + + return rc; +} + +static int digital_tg_send_atn(struct nfc_digital_dev *ddev) +{ + struct digital_dep_req_res *dep_res; + struct sk_buff *skb; + int rc; + + skb = digital_skb_alloc(ddev, 1); + if (!skb) + return -ENOMEM; + + skb_push(skb, sizeof(struct digital_dep_req_res)); + + dep_res = (struct digital_dep_req_res *)skb->data; + + dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN; + dep_res->cmd = DIGITAL_CMD_DEP_RES; + dep_res->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU; + + if (ddev->did) { + dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT; + + memcpy(skb_put(skb, sizeof(ddev->did)), &ddev->did, + sizeof(ddev->did)); + } + + digital_skb_push_dep_sod(ddev, skb); + + ddev->skb_add_crc(skb); + + rc = digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req, + NULL); + if (rc) + kfree_skb(skb); + + return rc; +} + +static int digital_tg_send_saved_skb(struct nfc_digital_dev *ddev) +{ + skb_get(ddev->saved_skb); + skb_push(ddev->saved_skb, ddev->saved_skb_len); + + return digital_tg_send_cmd(ddev, ddev->saved_skb, 1500, + digital_tg_recv_dep_req, NULL); +} + static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { int rc; struct digital_dep_req_res *dep_req; + u8 pfb; size_t size; if (IS_ERR(resp)) { @@ -532,6 +1042,11 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg, goto exit; } + if (resp->len > ddev->local_payload_max) { + rc = -EMSGSIZE; + goto exit; + } + size = sizeof(struct digital_dep_req_res); dep_req = (struct digital_dep_req_res *)resp->data; @@ -541,34 +1056,147 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg, goto exit; } - if (DIGITAL_NFC_DEP_DID_BIT_SET(dep_req->pfb)) - size++; + pfb = dep_req->pfb; - if (resp->len < size) { + if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb)) { + if (ddev->did && (ddev->did == resp->data[3])) { + size++; + } else { + rc = -EIO; + goto exit; + } + } else if (ddev->did) { rc = -EIO; goto exit; } - switch (DIGITAL_NFC_DEP_PFB_TYPE(dep_req->pfb)) { + if (DIGITAL_NFC_DEP_NAD_BIT_SET(pfb)) { + rc = -EIO; + goto exit; + } + + if (size > resp->len) { + rc = -EIO; + goto exit; + } + + skb_pull(resp, size); + + switch (DIGITAL_NFC_DEP_PFB_TYPE(pfb)) { case DIGITAL_NFC_DEP_PFB_I_PDU: pr_debug("DIGITAL_NFC_DEP_PFB_I_PDU\n"); - ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(dep_req->pfb); + + if ((ddev->atn_count && (DIGITAL_NFC_DEP_PFB_PNI(pfb - 1) != + ddev->curr_nfc_dep_pni)) || + (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni)) { + PROTOCOL_ERR("14.12.3.4"); + rc = -EIO; + goto exit; + } + + if (ddev->atn_count) { + ddev->atn_count = 0; + + rc = digital_tg_send_saved_skb(ddev); + if (rc) + goto exit; + + return; + } + + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + + resp = digital_recv_dep_data_gather(ddev, pfb, resp, + digital_tg_send_ack, NULL); + if (IS_ERR(resp)) { + rc = PTR_ERR(resp); + resp = NULL; + goto exit; + } + + /* If resp is NULL then we're still chaining so return and + * wait for the next part of the PDU. Else, the PDU is + * complete so pass it up. + */ + if (!resp) + return; + + rc = 0; break; case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU: - pr_err("Received a ACK/NACK PDU\n"); - rc = -EINVAL; - goto exit; + if (!DIGITAL_NFC_DEP_NACK_BIT_SET(pfb)) { /* ACK */ + if ((ddev->atn_count && + (DIGITAL_NFC_DEP_PFB_PNI(pfb - 1) != + ddev->curr_nfc_dep_pni)) || + (DIGITAL_NFC_DEP_PFB_PNI(pfb) != + ddev->curr_nfc_dep_pni) || + !ddev->chaining_skb || !ddev->saved_skb) { + rc = -EIO; + goto exit; + } + + if (ddev->atn_count) { + ddev->atn_count = 0; + + rc = digital_tg_send_saved_skb(ddev); + if (rc) + goto exit; + + return; + } + + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + + rc = digital_tg_send_dep_res(ddev, ddev->chaining_skb); + if (rc) + goto exit; + } else { /* NACK */ + if ((DIGITAL_NFC_DEP_PFB_PNI(pfb + 1) != + ddev->curr_nfc_dep_pni) || + !ddev->saved_skb) { + rc = -EIO; + goto exit; + } + + ddev->atn_count = 0; + + rc = digital_tg_send_saved_skb(ddev); + if (rc) { + kfree_skb(ddev->saved_skb); + goto exit; + } + } + + return; case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU: - pr_err("Received a SUPERVISOR PDU\n"); - rc = -EINVAL; - goto exit; - } + if (DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) { + rc = -EINVAL; + goto exit; + } - skb_pull(resp, size); + rc = digital_tg_send_atn(ddev); + if (rc) + goto exit; + + ddev->atn_count++; + + kfree_skb(resp); + return; + } rc = nfc_tm_data_received(ddev->nfc_dev, resp); exit: + kfree_skb(ddev->chaining_skb); + ddev->chaining_skb = NULL; + + ddev->atn_count = 0; + + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + if (rc) kfree_skb(resp); } @@ -576,20 +1204,54 @@ exit: int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb) { struct digital_dep_req_res *dep_res; + struct sk_buff *chaining_skb, *tmp_skb; + int rc; skb_push(skb, sizeof(struct digital_dep_req_res)); + dep_res = (struct digital_dep_req_res *)skb->data; dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN; dep_res->cmd = DIGITAL_CMD_DEP_RES; dep_res->pfb = ddev->curr_nfc_dep_pni; - digital_skb_push_dep_sod(ddev, skb); + if (ddev->did) { + dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT; - ddev->skb_add_crc(skb); + memcpy(skb_put(skb, sizeof(ddev->did)), &ddev->did, + sizeof(ddev->did)); + } + + ddev->curr_nfc_dep_pni = + DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1); + + chaining_skb = ddev->chaining_skb; + + tmp_skb = digital_send_dep_data_prep(ddev, skb, dep_res, NULL); + if (IS_ERR(tmp_skb)) + return PTR_ERR(tmp_skb); + + digital_skb_push_dep_sod(ddev, tmp_skb); + + ddev->skb_add_crc(tmp_skb); + + ddev->saved_skb = skb_get(tmp_skb); + ddev->saved_skb_len = tmp_skb->len; + + rc = digital_tg_send_cmd(ddev, tmp_skb, 1500, digital_tg_recv_dep_req, + NULL); + if (rc) { + if (tmp_skb != skb) + kfree_skb(tmp_skb); + + kfree_skb(chaining_skb); + ddev->chaining_skb = NULL; - return digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req, - NULL); + kfree_skb(ddev->saved_skb); + ddev->saved_skb = NULL; + } + + return rc; } static void digital_tg_send_psl_res_complete(struct nfc_digital_dev *ddev, @@ -632,9 +1294,10 @@ static int digital_tg_send_psl_res(struct nfc_digital_dev *ddev, u8 did, ddev->skb_add_crc(skb); + ddev->curr_nfc_dep_pni = 0; + rc = digital_tg_send_cmd(ddev, skb, 0, digital_tg_send_psl_res_complete, (void *)(unsigned long)rf_tech); - if (rc) kfree_skb(skb); @@ -647,7 +1310,7 @@ static void digital_tg_recv_psl_req(struct nfc_digital_dev *ddev, void *arg, int rc; struct digital_psl_req *psl_req; u8 rf_tech; - u8 dsi; + u8 dsi, payload_size, payload_bits; if (IS_ERR(resp)) { rc = PTR_ERR(resp); @@ -692,6 +1355,18 @@ static void digital_tg_recv_psl_req(struct nfc_digital_dev *ddev, void *arg, goto exit; } + payload_bits = DIGITAL_PAYLOAD_FSL_TO_BITS(psl_req->fsl); + payload_size = digital_payload_bits_to_size(payload_bits); + + if (!payload_size || (payload_size > min(ddev->local_payload_max, + ddev->remote_payload_max))) { + rc = -EINVAL; + goto exit; + } + + ddev->local_payload_max = payload_size; + ddev->remote_payload_max = payload_size; + rc = digital_tg_send_psl_res(ddev, psl_req->did, rf_tech); exit: @@ -712,6 +1387,8 @@ static void digital_tg_send_atr_res_complete(struct nfc_digital_dev *ddev, if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB) offset++; + ddev->atn_count = 0; + if (resp->data[offset] == DIGITAL_CMD_PSL_REQ) digital_tg_recv_psl_req(ddev, arg, resp); else @@ -723,7 +1400,7 @@ static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev, { struct digital_atr_res *atr_res; struct sk_buff *skb; - u8 *gb; + u8 *gb, payload_bits; size_t gb_len; int rc; @@ -744,7 +1421,11 @@ static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev, atr_res->cmd = DIGITAL_CMD_ATR_RES; memcpy(atr_res->nfcid3, atr_req->nfcid3, sizeof(atr_req->nfcid3)); atr_res->to = 8; - atr_res->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B; + + ddev->local_payload_max = DIGITAL_PAYLOAD_SIZE_MAX; + payload_bits = digital_payload_size_to_bits(ddev->local_payload_max); + atr_res->pp = DIGITAL_PAYLOAD_BITS_TO_PP(payload_bits); + if (gb_len) { skb_put(skb, gb_len); @@ -756,12 +1437,12 @@ static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev, ddev->skb_add_crc(skb); + ddev->curr_nfc_dep_pni = 0; + rc = digital_tg_send_cmd(ddev, skb, 999, digital_tg_send_atr_res_complete, NULL); - if (rc) { + if (rc) kfree_skb(skb); - return rc; - } return rc; } @@ -772,7 +1453,7 @@ void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg, int rc; struct digital_atr_req *atr_req; size_t gb_len, min_size; - u8 poll_tech_count; + u8 poll_tech_count, payload_bits; if (IS_ERR(resp)) { rc = PTR_ERR(resp); @@ -815,11 +1496,22 @@ void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg, atr_req = (struct digital_atr_req *)resp->data; if (atr_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT || - atr_req->cmd != DIGITAL_CMD_ATR_REQ) { + atr_req->cmd != DIGITAL_CMD_ATR_REQ || + atr_req->did > DIGITAL_DID_MAX) { rc = -EINVAL; goto exit; } + payload_bits = DIGITAL_PAYLOAD_PP_TO_BITS(atr_req->pp); + ddev->remote_payload_max = digital_payload_bits_to_size(payload_bits); + + if (!ddev->remote_payload_max) { + rc = -EINVAL; + goto exit; + } + + ddev->did = atr_req->did; + rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED); if (rc) diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c index 677d24bb70f8af24569ef574404ba4b1b9e1c9f0..91df487aa0a9cf2a69f368a9211e80efcc06b012 100644 --- a/net/nfc/hci/command.c +++ b/net/nfc/hci/command.c @@ -345,6 +345,9 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, pr_debug("\n"); + if (hdev->gate2pipe[dest_gate] == NFC_HCI_DO_NOT_CREATE_PIPE) + return 0; + if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE) return -EADDRINUSE; diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index 117708263ced38dfa690e1d49a6c19d661751289..ef50e7716c4a8742840731db126351fee16bc57b 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -167,6 +167,48 @@ exit: void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, struct sk_buff *skb) { + int r = 0; + u8 gate = nfc_hci_pipe2gate(hdev, pipe); + u8 local_gate, new_pipe; + u8 gate_opened = 0x00; + + pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd); + + switch (cmd) { + case NFC_HCI_ADM_NOTIFY_PIPE_CREATED: + if (skb->len != 5) { + r = -EPROTO; + break; + } + + local_gate = skb->data[3]; + new_pipe = skb->data[4]; + nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK, NULL, 0); + + /* save the new created pipe and bind with local gate, + * the description for skb->data[3] is destination gate id + * but since we received this cmd from host controller, we + * are the destination and it is our local gate + */ + hdev->gate2pipe[local_gate] = new_pipe; + break; + case NFC_HCI_ANY_OPEN_PIPE: + /* if the pipe is already created, we allow remote host to + * open it + */ + if (gate != 0xff) + nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK, + &gate_opened, 1); + break; + case NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED: + nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK, NULL, 0); + break; + default: + pr_info("Discarded unknown cmd %x to gate %x\n", cmd, gate); + r = -EINVAL; + break; + } + kfree_skb(skb); } @@ -717,6 +759,19 @@ static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) return 0; } +static int hci_se_io(struct nfc_dev *nfc_dev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hdev->ops->se_io) + return hdev->ops->se_io(hdev, se_idx, apdu, + apdu_length, cb, cb_context); + + return 0; +} + static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err) { mutex_lock(&hdev->msg_tx_mutex); @@ -830,6 +885,7 @@ static struct nfc_ops hci_nfc_ops = { .discover_se = hci_discover_se, .enable_se = hci_enable_se, .disable_se = hci_disable_se, + .se_io = hci_se_io, }; struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index a3ad69a4c648c76779bf496153bfa7eb01f5354c..3621a902cb6e36e3399848694a8ab6bfc0a962a8 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -401,7 +401,8 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) u8 *miux_tlv = NULL, miux_tlv_length; u8 *rw_tlv = NULL, rw_tlv_length, rw; int err; - u16 size = 0, miux; + u16 size = 0; + __be16 miux; pr_debug("Sending CONNECT\n"); @@ -465,7 +466,8 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) u8 *miux_tlv = NULL, miux_tlv_length; u8 *rw_tlv = NULL, rw_tlv_length, rw; int err; - u16 size = 0, miux; + u16 size = 0; + __be16 miux; pr_debug("Sending CC\n"); @@ -665,7 +667,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, if (msg_data == NULL) return -ENOMEM; - if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) { + if (memcpy_from_msg(msg_data, msg, len)) { kfree(msg_data); return -EFAULT; } @@ -731,7 +733,7 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, if (msg_data == NULL) return -ENOMEM; - if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) { + if (memcpy_from_msg(msg_data, msg, len)) { kfree(msg_data); return -EFAULT; } diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index 51e7887973171f999eb7c77984d7dddf081a5b75..b18f07ccb504c0f8c2aea36be80069a0376def76 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -1,5 +1,6 @@ /* * Copyright (C) 2011 Intel Corporation. All rights reserved. + * Copyright (C) 2014 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1511,8 +1512,10 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb) struct nfc_llcp_local *local; local = nfc_llcp_find_local(dev); - if (local == NULL) + if (local == NULL) { + kfree_skb(skb); return -ENODEV; + } __nfc_llcp_recv(local, skb); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 51f077a92fa9220eb987b7b9185efcc75312328a..e181e290427cd9727d59ddb1989d2974937f1754 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -524,13 +524,13 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr, static inline unsigned int llcp_accept_poll(struct sock *parent) { - struct nfc_llcp_sock *llcp_sock, *n, *parent_sock; + struct nfc_llcp_sock *llcp_sock, *parent_sock; struct sock *sk; parent_sock = nfc_llcp_sock(parent); - list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue, - accept_queue) { + list_for_each_entry(llcp_sock, &parent_sock->accept_queue, + accept_queue) { sk = &llcp_sock->sk; if (sk->sk_state == LLCP_CONNECTED) @@ -832,7 +832,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, copied = min_t(unsigned int, rlen, len); cskb = skb; - if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { + if (skb_copy_datagram_msg(cskb, 0, msg, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 90b16cb4005880214f1eca87cc5e72f2f75c7c34..51feb5e630082a78d67c8342c0883168df9b2fb2 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -3,6 +3,7 @@ * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2014 Marvell International Ltd. * * Written by Ilan Elias * @@ -196,18 +197,24 @@ static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt) nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd); } +struct nci_rf_discover_param { + __u32 im_protocols; + __u32 tm_protocols; +}; + static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) { + struct nci_rf_discover_param *param = + (struct nci_rf_discover_param *)opt; struct nci_rf_disc_cmd cmd; - __u32 protocols = opt; cmd.num_disc_configs = 0; if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && - (protocols & NFC_PROTO_JEWEL_MASK || - protocols & NFC_PROTO_MIFARE_MASK || - protocols & NFC_PROTO_ISO14443_MASK || - protocols & NFC_PROTO_NFC_DEP_MASK)) { + (param->im_protocols & NFC_PROTO_JEWEL_MASK || + param->im_protocols & NFC_PROTO_MIFARE_MASK || + param->im_protocols & NFC_PROTO_ISO14443_MASK || + param->im_protocols & NFC_PROTO_NFC_DEP_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_A_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; @@ -215,7 +222,7 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && - (protocols & NFC_PROTO_ISO14443_B_MASK)) { + (param->im_protocols & NFC_PROTO_ISO14443_B_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_B_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; @@ -223,8 +230,8 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && - (protocols & NFC_PROTO_FELICA_MASK || - protocols & NFC_PROTO_NFC_DEP_MASK)) { + (param->im_protocols & NFC_PROTO_FELICA_MASK || + param->im_protocols & NFC_PROTO_NFC_DEP_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_F_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; @@ -232,13 +239,25 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && - (protocols & NFC_PROTO_ISO15693_MASK)) { + (param->im_protocols & NFC_PROTO_ISO15693_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_V_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } + if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS - 1) && + (param->tm_protocols & NFC_PROTO_NFC_DEP_MASK)) { + cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = + NCI_NFC_A_PASSIVE_LISTEN_MODE; + cmd.disc_configs[cmd.num_disc_configs].frequency = 1; + cmd.num_disc_configs++; + cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = + NCI_NFC_F_PASSIVE_LISTEN_MODE; + cmd.disc_configs[cmd.num_disc_configs].frequency = 1; + cmd.num_disc_configs++; + } + nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD, (1 + (cmd.num_disc_configs * sizeof(struct disc_config))), &cmd); @@ -280,7 +299,7 @@ static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt) { struct nci_rf_deactivate_cmd cmd; - cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE; + cmd.type = opt; nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD, sizeof(struct nci_rf_deactivate_cmd), &cmd); @@ -441,6 +460,7 @@ static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); struct nci_set_config_param param; + int rc; param.val = nfc_get_local_general_bytes(nfc_dev, ¶m.len); if ((param.val == NULL) || (param.len == 0)) @@ -451,14 +471,45 @@ static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev) param.id = NCI_PN_ATR_REQ_GEN_BYTES; + rc = nci_request(ndev, nci_set_config_req, (unsigned long)¶m, + msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); + if (rc) + return rc; + + param.id = NCI_LN_ATR_RES_GEN_BYTES; + return nci_request(ndev, nci_set_config_req, (unsigned long)¶m, msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); } +static int nci_set_listen_parameters(struct nfc_dev *nfc_dev) +{ + struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + int rc; + __u8 val; + + val = NCI_LA_SEL_INFO_NFC_DEP_MASK; + + rc = nci_set_config(ndev, NCI_LA_SEL_INFO, 1, &val); + if (rc) + return rc; + + val = NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK; + + rc = nci_set_config(ndev, NCI_LF_PROTOCOL_TYPE, 1, &val); + if (rc) + return rc; + + val = NCI_LF_CON_BITR_F_212 | NCI_LF_CON_BITR_F_424; + + return nci_set_config(ndev, NCI_LF_CON_BITR_F, 1, &val); +} + static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols, __u32 tm_protocols) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + struct nci_rf_discover_param param; int rc; if ((atomic_read(&ndev->state) == NCI_DISCOVERY) || @@ -476,13 +527,14 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) { pr_debug("target active or w4 select, implicitly deactivate\n"); - rc = nci_request(ndev, nci_rf_deactivate_req, 0, + rc = nci_request(ndev, nci_rf_deactivate_req, + NCI_DEACTIVATE_TYPE_IDLE_MODE, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); if (rc) return -EBUSY; } - if (im_protocols & NFC_PROTO_NFC_DEP_MASK) { + if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) { rc = nci_set_local_general_bytes(nfc_dev); if (rc) { pr_err("failed to set local general bytes\n"); @@ -490,7 +542,15 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, } } - rc = nci_request(ndev, nci_rf_discover_req, im_protocols, + if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) { + rc = nci_set_listen_parameters(nfc_dev); + if (rc) + pr_err("failed to set listen parameters\n"); + } + + param.im_protocols = im_protocols; + param.tm_protocols = tm_protocols; + rc = nci_request(ndev, nci_rf_discover_req, (unsigned long)¶m, msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); if (!rc) @@ -509,7 +569,7 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev) return; } - nci_request(ndev, nci_rf_deactivate_req, 0, + nci_request(ndev, nci_rf_deactivate_req, NCI_DEACTIVATE_TYPE_IDLE_MODE, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); } @@ -594,7 +654,8 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, ndev->target_active_prot = 0; if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) { - nci_request(ndev, nci_rf_deactivate_req, 0, + nci_request(ndev, nci_rf_deactivate_req, + NCI_DEACTIVATE_TYPE_SLEEP_MODE, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); } } @@ -622,9 +683,24 @@ static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, static int nci_dep_link_down(struct nfc_dev *nfc_dev) { + struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + int rc; + pr_debug("entry\n"); - nci_deactivate_target(nfc_dev, NULL); + if (nfc_dev->rf_mode == NFC_RF_INITIATOR) { + nci_deactivate_target(nfc_dev, NULL); + } else { + if (atomic_read(&ndev->state) == NCI_LISTEN_ACTIVE || + atomic_read(&ndev->state) == NCI_DISCOVERY) { + nci_request(ndev, nci_rf_deactivate_req, 0, + msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); + } + + rc = nfc_tm_deactivated(nfc_dev); + if (rc) + pr_err("error when signaling tm deactivation\n"); + } return 0; } @@ -658,18 +734,58 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, return rc; } +static int nci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) +{ + struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + int rc; + + rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); + if (rc) + pr_err("unable to send data\n"); + + return rc; +} + static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx) { + struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + + if (ndev->ops->enable_se) + return ndev->ops->enable_se(ndev, se_idx); + return 0; } static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) { + struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + + if (ndev->ops->disable_se) + return ndev->ops->disable_se(ndev, se_idx); + return 0; } static int nci_discover_se(struct nfc_dev *nfc_dev) { + struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + + if (ndev->ops->discover_se) + return ndev->ops->discover_se(ndev); + + return 0; +} + +static int nci_se_io(struct nfc_dev *nfc_dev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context) +{ + struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); + + if (ndev->ops->se_io) + return ndev->ops->se_io(ndev, se_idx, apdu, + apdu_length, cb, cb_context); + return 0; } @@ -683,9 +799,11 @@ static struct nfc_ops nci_nfc_ops = { .activate_target = nci_activate_target, .deactivate_target = nci_deactivate_target, .im_transceive = nci_transceive, + .tm_send = nci_tm_send, .enable_se = nci_enable_se, .disable_se = nci_disable_se, .discover_se = nci_discover_se, + .se_io = nci_se_io, }; /* ---- Interface to NCI drivers ---- */ diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c index 427ef2c7ab681245c8d392e76ccaace41cab10e3..a2de2a8cb00e2db62b46dc237f3538bc574b18b1 100644 --- a/net/nfc/nci/data.c +++ b/net/nfc/nci/data.c @@ -3,6 +3,7 @@ * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2014 Marvell International Ltd. * * Written by Ilan Elias * @@ -184,11 +185,16 @@ exit: static void nci_add_rx_data_frag(struct nci_dev *ndev, struct sk_buff *skb, - __u8 pbf) + __u8 pbf, __u8 status) { int reassembly_len; int err = 0; + if (status) { + err = status; + goto exit; + } + if (ndev->rx_data_reassembly) { reassembly_len = ndev->rx_data_reassembly->len; @@ -223,13 +229,24 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev, } exit: - nci_data_exchange_complete(ndev, skb, err); + if (ndev->nfc_dev->rf_mode == NFC_RF_INITIATOR) { + nci_data_exchange_complete(ndev, skb, err); + } else if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) { + /* Data received in Target mode, forward to nfc core */ + err = nfc_tm_data_received(ndev->nfc_dev, skb); + if (err) + pr_err("unable to handle received data\n"); + } else { + pr_err("rf mode unknown\n"); + kfree_skb(skb); + } } /* Rx Data packet */ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 pbf = nci_pbf(skb->data); + __u8 status = 0; pr_debug("len %d\n", skb->len); @@ -247,8 +264,9 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) ndev->target_active_prot == NFC_PROTO_ISO15693) { /* frame I/F => remove the status byte */ pr_debug("frame I/F => remove the status byte\n"); + status = skb->data[skb->len - 1]; skb_trim(skb, (skb->len - 1)); } - nci_add_rx_data_frag(ndev, skb, pbf); + nci_add_rx_data_frag(ndev, skb, pbf, nci_to_errno(status)); } diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index 205b35f666dba827d4daafd6d29c33483e73a609..22e453cb787d4d62e3246919f32ae5db14557ef3 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -103,7 +103,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfca_poll *nfca_poll, __u8 *data) { - nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data)); + nfca_poll->sens_res = __le16_to_cpu(*((__le16 *)data)); data += 2; nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE); @@ -167,7 +167,19 @@ static __u8 *nci_extract_rf_params_nfcv_passive_poll(struct nci_dev *ndev, return data; } -__u32 nci_get_prop_rf_protocol(struct nci_dev *ndev, __u8 rf_protocol) +static __u8 *nci_extract_rf_params_nfcf_passive_listen(struct nci_dev *ndev, + struct rf_tech_specific_params_nfcf_listen *nfcf_listen, + __u8 *data) +{ + nfcf_listen->local_nfcid2_len = min_t(__u8, *data++, + NFC_NFCID2_MAXSIZE); + memcpy(nfcf_listen->local_nfcid2, data, nfcf_listen->local_nfcid2_len); + data += nfcf_listen->local_nfcid2_len; + + return data; +} + +static __u32 nci_get_prop_rf_protocol(struct nci_dev *ndev, __u8 rf_protocol) { if (ndev->ops->get_rfprotocol) return ndev->ops->get_rfprotocol(ndev, rf_protocol); @@ -401,17 +413,29 @@ static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev, struct nci_rf_intf_activated_ntf *ntf, __u8 *data) { struct activation_params_poll_nfc_dep *poll; + struct activation_params_listen_nfc_dep *listen; switch (ntf->activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: case NCI_NFC_F_PASSIVE_POLL_MODE: poll = &ntf->activation_params.poll_nfc_dep; - poll->atr_res_len = min_t(__u8, *data++, 63); + poll->atr_res_len = min_t(__u8, *data++, + NFC_ATR_RES_MAXSIZE - 2); pr_debug("atr_res_len %d\n", poll->atr_res_len); if (poll->atr_res_len > 0) memcpy(poll->atr_res, data, poll->atr_res_len); break; + case NCI_NFC_A_PASSIVE_LISTEN_MODE: + case NCI_NFC_F_PASSIVE_LISTEN_MODE: + listen = &ntf->activation_params.listen_nfc_dep; + listen->atr_req_len = min_t(__u8, *data++, + NFC_ATR_REQ_MAXSIZE - 2); + pr_debug("atr_req_len %d\n", listen->atr_req_len); + if (listen->atr_req_len > 0) + memcpy(listen->atr_req, data, listen->atr_req_len); + break; + default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf->activation_rf_tech_and_mode); @@ -444,6 +468,48 @@ static void nci_target_auto_activated(struct nci_dev *ndev, nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } +static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev, + struct nci_rf_intf_activated_ntf *ntf) +{ + ndev->remote_gb_len = 0; + + if (ntf->activation_params_len <= 0) + return NCI_STATUS_OK; + + switch (ntf->activation_rf_tech_and_mode) { + case NCI_NFC_A_PASSIVE_POLL_MODE: + case NCI_NFC_F_PASSIVE_POLL_MODE: + ndev->remote_gb_len = min_t(__u8, + (ntf->activation_params.poll_nfc_dep.atr_res_len + - NFC_ATR_RES_GT_OFFSET), + NFC_ATR_RES_GB_MAXSIZE); + memcpy(ndev->remote_gb, + (ntf->activation_params.poll_nfc_dep.atr_res + + NFC_ATR_RES_GT_OFFSET), + ndev->remote_gb_len); + break; + + case NCI_NFC_A_PASSIVE_LISTEN_MODE: + case NCI_NFC_F_PASSIVE_LISTEN_MODE: + ndev->remote_gb_len = min_t(__u8, + (ntf->activation_params.listen_nfc_dep.atr_req_len + - NFC_ATR_REQ_GT_OFFSET), + NFC_ATR_REQ_GB_MAXSIZE); + memcpy(ndev->remote_gb, + (ntf->activation_params.listen_nfc_dep.atr_req + + NFC_ATR_REQ_GT_OFFSET), + ndev->remote_gb_len); + break; + + default: + pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", + ntf->activation_rf_tech_and_mode); + return NCI_STATUS_RF_PROTOCOL_ERROR; + } + + return NCI_STATUS_OK; +} + static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { @@ -493,6 +559,16 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, &(ntf.rf_tech_specific_params.nfcv_poll), data); break; + case NCI_NFC_A_PASSIVE_LISTEN_MODE: + /* no RF technology specific parameters */ + break; + + case NCI_NFC_F_PASSIVE_LISTEN_MODE: + data = nci_extract_rf_params_nfcf_passive_listen(ndev, + &(ntf.rf_tech_specific_params.nfcf_listen), + data); + break; + default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf.activation_rf_tech_and_mode); @@ -546,32 +622,39 @@ exit: /* store general bytes to be reported later in dep_link_up */ if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) { - ndev->remote_gb_len = 0; - - if (ntf.activation_params_len > 0) { - /* ATR_RES general bytes at offset 15 */ - ndev->remote_gb_len = min_t(__u8, - (ntf.activation_params - .poll_nfc_dep.atr_res_len - - NFC_ATR_RES_GT_OFFSET), - NFC_MAX_GT_LEN); - memcpy(ndev->remote_gb, - (ntf.activation_params.poll_nfc_dep - .atr_res + NFC_ATR_RES_GT_OFFSET), - ndev->remote_gb_len); - } + err = nci_store_general_bytes_nfc_dep(ndev, &ntf); + if (err != NCI_STATUS_OK) + pr_err("unable to store general bytes\n"); } } - if (atomic_read(&ndev->state) == NCI_DISCOVERY) { - /* A single target was found and activated automatically */ - atomic_set(&ndev->state, NCI_POLL_ACTIVE); - if (err == NCI_STATUS_OK) - nci_target_auto_activated(ndev, &ntf); - } else { /* ndev->state == NCI_W4_HOST_SELECT */ - /* A selected target was activated, so complete the request */ - atomic_set(&ndev->state, NCI_POLL_ACTIVE); - nci_req_complete(ndev, err); + if (!(ntf.activation_rf_tech_and_mode & NCI_RF_TECH_MODE_LISTEN_MASK)) { + /* Poll mode */ + if (atomic_read(&ndev->state) == NCI_DISCOVERY) { + /* A single target was found and activated + * automatically */ + atomic_set(&ndev->state, NCI_POLL_ACTIVE); + if (err == NCI_STATUS_OK) + nci_target_auto_activated(ndev, &ntf); + } else { /* ndev->state == NCI_W4_HOST_SELECT */ + /* A selected target was activated, so complete the + * request */ + atomic_set(&ndev->state, NCI_POLL_ACTIVE); + nci_req_complete(ndev, err); + } + } else { + /* Listen mode */ + atomic_set(&ndev->state, NCI_LISTEN_ACTIVE); + if (err == NCI_STATUS_OK && + ntf.rf_protocol == NCI_RF_PROTOCOL_NFC_DEP) { + err = nfc_tm_activated(ndev->nfc_dev, + NFC_PROTO_NFC_DEP_MASK, + NFC_COMM_PASSIVE, + ndev->remote_gb, + ndev->remote_gb_len); + if (err != NCI_STATUS_OK) + pr_err("error when signaling tm activation\n"); + } } } @@ -595,8 +678,21 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, -EIO); - nci_clear_target_list(ndev); - atomic_set(&ndev->state, NCI_IDLE); + switch (ntf->type) { + case NCI_DEACTIVATE_TYPE_IDLE_MODE: + nci_clear_target_list(ndev); + atomic_set(&ndev->state, NCI_IDLE); + break; + case NCI_DEACTIVATE_TYPE_SLEEP_MODE: + case NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE: + atomic_set(&ndev->state, NCI_W4_HOST_SELECT); + break; + case NCI_DEACTIVATE_TYPE_DISCOVERY: + nci_clear_target_list(ndev); + atomic_set(&ndev->state, NCI_DISCOVERY); + break; + } + nci_req_complete(ndev, NCI_STATUS_OK); } diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 43cb1c17e267d0f614029bad9d3eebcf7b6f5e4c..44989fc8cddf19550a7a525503f9d267099470b2 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -810,6 +810,31 @@ out: return rc; } +static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info) +{ + struct nfc_dev *dev; + u32 device_idx, target_idx, protocol; + int rc; + + if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + return -EINVAL; + + device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); + + dev = nfc_get_device(device_idx); + if (!dev) + return -ENODEV; + + target_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); + protocol = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); + + nfc_deactivate_target(dev, target_idx); + rc = nfc_activate_target(dev, target_idx, protocol); + + nfc_put_device(dev); + return 0; +} + static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; @@ -1285,6 +1310,51 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb) return 0; } +static int nfc_se_io(struct nfc_dev *dev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context) +{ + struct nfc_se *se; + int rc; + + pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); + + device_lock(&dev->dev); + + if (!device_is_registered(&dev->dev)) { + rc = -ENODEV; + goto error; + } + + if (!dev->dev_up) { + rc = -ENODEV; + goto error; + } + + if (!dev->ops->se_io) { + rc = -EOPNOTSUPP; + goto error; + } + + se = nfc_find_se(dev, se_idx); + if (!se) { + rc = -EINVAL; + goto error; + } + + if (se->state != NFC_SE_ENABLED) { + rc = -ENODEV; + goto error; + } + + rc = dev->ops->se_io(dev, se_idx, apdu, + apdu_length, cb, cb_context); + +error: + device_unlock(&dev->dev); + return rc; +} + struct se_io_ctx { u32 dev_idx; u32 se_idx; @@ -1367,7 +1437,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info) ctx->dev_idx = dev_idx; ctx->se_idx = se_idx; - return dev->ops->se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); + return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); } static const struct genl_ops nfc_genl_ops[] = { @@ -1455,6 +1525,11 @@ static const struct genl_ops nfc_genl_ops[] = { .doit = nfc_genl_se_io, .policy = nfc_genl_policy, }, + { + .cmd = NFC_CMD_ACTIVATE_TARGET, + .doit = nfc_genl_activate_target, + .policy = nfc_genl_policy, + }, }; diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 11c3544ea5466f54f8038ca19f500a831d1ca79f..373e138c0ab6687686c084ac6daad55af7b1dc72 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -231,7 +231,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, if (skb == NULL) return rc; - rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc < 0) { kfree_skb(skb); return rc; @@ -269,7 +269,7 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, copied = len; } - rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + rc = skb_copy_datagram_msg(skb, 0, msg, copied); skb_free_datagram(sk, skb); diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index ba3bb8203b999bce68dba7e5d158465e1cab9e02..b7d818c594234bae63e2b603cc2ee3afe10fa1c2 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -4,7 +4,9 @@ config OPENVSWITCH tristate "Open vSwitch" + depends on INET select LIBCRC32C + select NET_MPLS_GSO ---help--- Open vSwitch is a multilayer Ethernet switch targeted at virtualized environments. In addition to supporting a variety of features @@ -29,11 +31,10 @@ config OPENVSWITCH If unsure, say N. config OPENVSWITCH_GRE - bool "Open vSwitch GRE tunneling support" - depends on INET + tristate "Open vSwitch GRE tunneling support" depends on OPENVSWITCH - depends on NET_IPGRE_DEMUX && !(OPENVSWITCH=y && NET_IPGRE_DEMUX=m) - default y + depends on NET_IPGRE_DEMUX + default OPENVSWITCH ---help--- If you say Y here, then the Open vSwitch will be able create GRE vport. @@ -43,11 +44,10 @@ config OPENVSWITCH_GRE If unsure, say Y. config OPENVSWITCH_VXLAN - bool "Open vSwitch VXLAN tunneling support" - depends on INET + tristate "Open vSwitch VXLAN tunneling support" depends on OPENVSWITCH - depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m) - default y + depends on VXLAN + default OPENVSWITCH ---help--- If you say Y here, then the Open vSwitch will be able create vxlan vport. @@ -56,11 +56,10 @@ config OPENVSWITCH_VXLAN If unsure, say Y. config OPENVSWITCH_GENEVE - bool "Open vSwitch Geneve tunneling support" - depends on INET + tristate "Open vSwitch Geneve tunneling support" depends on OPENVSWITCH - depends on GENEVE && !(OPENVSWITCH=y && GENEVE=m) - default y + depends on GENEVE + default OPENVSWITCH ---help--- If you say Y here, then the Open vSwitch will be able create geneve vport. diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index 9a33a273c375bdad208f5fa6b36d73743c791d00..91b9478413ef1ee97602666142c65e9a2fad5228 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile @@ -15,14 +15,6 @@ openvswitch-y := \ vport-internal_dev.o \ vport-netdev.o -ifneq ($(CONFIG_OPENVSWITCH_GENEVE),) -openvswitch-y += vport-geneve.o -endif - -ifneq ($(CONFIG_OPENVSWITCH_VXLAN),) -openvswitch-y += vport-vxlan.o -endif - -ifneq ($(CONFIG_OPENVSWITCH_GRE),) -openvswitch-y += vport-gre.o -endif +obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o +obj-$(CONFIG_OPENVSWITCH_VXLAN) += vport-vxlan.o +obj-$(CONFIG_OPENVSWITCH_GRE) += vport-gre.o diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 8c4229b11c34c617a81e58dcdb1a4a6034fd064c..764fdc39c63b6565d29d2c4a1d1ed7629924c412 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -28,10 +28,12 @@ #include #include #include + #include #include #include #include +#include #include #include "datapath.h" @@ -67,7 +69,7 @@ static void action_fifo_init(struct action_fifo *fifo) fifo->tail = 0; } -static bool action_fifo_is_empty(struct action_fifo *fifo) +static bool action_fifo_is_empty(const struct action_fifo *fifo) { return (fifo->head == fifo->tail); } @@ -90,7 +92,7 @@ static struct deferred_action *action_fifo_put(struct action_fifo *fifo) /* Return true if fifo is not full */ static struct deferred_action *add_deferred_actions(struct sk_buff *skb, - struct sw_flow_key *key, + const struct sw_flow_key *key, const struct nlattr *attr) { struct action_fifo *fifo; @@ -107,100 +109,131 @@ static struct deferred_action *add_deferred_actions(struct sk_buff *skb, return da; } -static int make_writable(struct sk_buff *skb, int write_len) +static void invalidate_flow_key(struct sw_flow_key *key) +{ + key->eth.type = htons(0); +} + +static bool is_flow_key_valid(const struct sw_flow_key *key) { - if (!pskb_may_pull(skb, write_len)) + return !!key->eth.type; +} + +static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_action_push_mpls *mpls) +{ + __be32 *new_mpls_lse; + struct ethhdr *hdr; + + /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ + if (skb->encapsulation) + return -ENOTSUPP; + + if (skb_cow_head(skb, MPLS_HLEN) < 0) return -ENOMEM; - if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) - return 0; + skb_push(skb, MPLS_HLEN); + memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), + skb->mac_len); + skb_reset_mac_header(skb); + + new_mpls_lse = (__be32 *)skb_mpls_header(skb); + *new_mpls_lse = mpls->mpls_lse; - return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse, + MPLS_HLEN, 0)); + + hdr = eth_hdr(skb); + hdr->h_proto = mpls->mpls_ethertype; + + skb_set_inner_protocol(skb, skb->protocol); + skb->protocol = mpls->mpls_ethertype; + + invalidate_flow_key(key); + return 0; } -/* remove VLAN header from packet and update csum accordingly. */ -static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) +static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, + const __be16 ethertype) { - struct vlan_hdr *vhdr; + struct ethhdr *hdr; int err; - err = make_writable(skb, VLAN_ETH_HLEN); + err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); if (unlikely(err)) return err; - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_sub(skb->csum, csum_partial(skb->data - + (2 * ETH_ALEN), VLAN_HLEN, 0)); + skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN); - vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); - *current_tci = vhdr->h_vlan_TCI; + memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), + skb->mac_len); - memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); - __skb_pull(skb, VLAN_HLEN); + __skb_pull(skb, MPLS_HLEN); + skb_reset_mac_header(skb); - vlan_set_encap_proto(skb, vhdr); - skb->mac_header += VLAN_HLEN; - if (skb_network_offset(skb) < ETH_HLEN) - skb_set_network_header(skb, ETH_HLEN); - skb_reset_mac_len(skb); + /* skb_mpls_header() is used to locate the ethertype + * field correctly in the presence of VLAN tags. + */ + hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); + hdr->h_proto = ethertype; + if (eth_p_mpls(skb->protocol)) + skb->protocol = ethertype; + invalidate_flow_key(key); return 0; } -static int pop_vlan(struct sk_buff *skb) +static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key, + const __be32 *mpls_lse) { - __be16 tci; + __be32 *stack; int err; - if (likely(vlan_tx_tag_present(skb))) { - skb->vlan_tci = 0; - } else { - if (unlikely(skb->protocol != htons(ETH_P_8021Q) || - skb->len < VLAN_ETH_HLEN)) - return 0; - - err = __pop_vlan_tci(skb, &tci); - if (err) - return err; - } - /* move next vlan tag to hw accel tag */ - if (likely(skb->protocol != htons(ETH_P_8021Q) || - skb->len < VLAN_ETH_HLEN)) - return 0; - - err = __pop_vlan_tci(skb, &tci); + err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); if (unlikely(err)) return err; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci)); + stack = (__be32 *)skb_mpls_header(skb); + if (skb->ip_summed == CHECKSUM_COMPLETE) { + __be32 diff[] = { ~(*stack), *mpls_lse }; + skb->csum = ~csum_partial((char *)diff, sizeof(diff), + ~skb->csum); + } + + *stack = *mpls_lse; + key->mpls.top_lse = *mpls_lse; return 0; } -static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan) +static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key) { - if (unlikely(vlan_tx_tag_present(skb))) { - u16 current_tag; - - /* push down current VLAN tag */ - current_tag = vlan_tx_tag_get(skb); - - if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) - return -ENOMEM; + int err; - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_add(skb->csum, csum_partial(skb->data - + (2 * ETH_ALEN), VLAN_HLEN, 0)); + err = skb_vlan_pop(skb); + if (vlan_tx_tag_present(skb)) + invalidate_flow_key(key); + else + key->eth.tci = 0; + return err; +} - } - __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); - return 0; +static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_action_push_vlan *vlan) +{ + if (vlan_tx_tag_present(skb)) + invalidate_flow_key(key); + else + key->eth.tci = vlan->vlan_tci; + return skb_vlan_push(skb, vlan->vlan_tpid, + ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); } -static int set_eth_addr(struct sk_buff *skb, +static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key, const struct ovs_key_ethernet *eth_key) { int err; - err = make_writable(skb, ETH_HLEN); + err = skb_ensure_writable(skb, ETH_HLEN); if (unlikely(err)) return err; @@ -211,11 +244,13 @@ static int set_eth_addr(struct sk_buff *skb, ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); + ether_addr_copy(key->eth.src, eth_key->eth_src); + ether_addr_copy(key->eth.dst, eth_key->eth_dst); return 0; } static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, - __be32 *addr, __be32 new_addr) + __be32 *addr, __be32 new_addr) { int transport_len = skb->len - skb_transport_offset(skb); @@ -298,42 +333,52 @@ static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) nh->ttl = new_ttl; } -static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) +static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_ipv4 *ipv4_key) { struct iphdr *nh; int err; - err = make_writable(skb, skb_network_offset(skb) + - sizeof(struct iphdr)); + err = skb_ensure_writable(skb, skb_network_offset(skb) + + sizeof(struct iphdr)); if (unlikely(err)) return err; nh = ip_hdr(skb); - if (ipv4_key->ipv4_src != nh->saddr) + if (ipv4_key->ipv4_src != nh->saddr) { set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); + key->ipv4.addr.src = ipv4_key->ipv4_src; + } - if (ipv4_key->ipv4_dst != nh->daddr) + if (ipv4_key->ipv4_dst != nh->daddr) { set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); + key->ipv4.addr.dst = ipv4_key->ipv4_dst; + } - if (ipv4_key->ipv4_tos != nh->tos) + if (ipv4_key->ipv4_tos != nh->tos) { ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); + key->ip.tos = nh->tos; + } - if (ipv4_key->ipv4_ttl != nh->ttl) + if (ipv4_key->ipv4_ttl != nh->ttl) { set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); + key->ip.ttl = ipv4_key->ipv4_ttl; + } return 0; } -static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) +static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_ipv6 *ipv6_key) { struct ipv6hdr *nh; int err; __be32 *saddr; __be32 *daddr; - err = make_writable(skb, skb_network_offset(skb) + - sizeof(struct ipv6hdr)); + err = skb_ensure_writable(skb, skb_network_offset(skb) + + sizeof(struct ipv6hdr)); if (unlikely(err)) return err; @@ -341,9 +386,12 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) saddr = (__be32 *)&nh->saddr; daddr = (__be32 *)&nh->daddr; - if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) + if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) { set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, ipv6_key->ipv6_src, true); + memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src, + sizeof(ipv6_key->ipv6_src)); + } if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { unsigned int offset = 0; @@ -357,16 +405,22 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, ipv6_key->ipv6_dst, recalc_csum); + memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst, + sizeof(ipv6_key->ipv6_dst)); } set_ipv6_tc(nh, ipv6_key->ipv6_tclass); + key->ip.tos = ipv6_get_dsfield(nh); + set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); - nh->hop_limit = ipv6_key->ipv6_hlimit; + key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); + nh->hop_limit = ipv6_key->ipv6_hlimit; + key->ip.ttl = ipv6_key->ipv6_hlimit; return 0; } -/* Must follow make_writable() since that can move the skb data. */ +/* Must follow skb_ensure_writable() since that can move the skb data. */ static void set_tp_port(struct sk_buff *skb, __be16 *port, __be16 new_port, __sum16 *check) { @@ -390,54 +444,64 @@ static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) } } -static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) +static int set_udp(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_udp *udp_port_key) { struct udphdr *uh; int err; - err = make_writable(skb, skb_transport_offset(skb) + - sizeof(struct udphdr)); + err = skb_ensure_writable(skb, skb_transport_offset(skb) + + sizeof(struct udphdr)); if (unlikely(err)) return err; uh = udp_hdr(skb); - if (udp_port_key->udp_src != uh->source) + if (udp_port_key->udp_src != uh->source) { set_udp_port(skb, &uh->source, udp_port_key->udp_src); + key->tp.src = udp_port_key->udp_src; + } - if (udp_port_key->udp_dst != uh->dest) + if (udp_port_key->udp_dst != uh->dest) { set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); + key->tp.dst = udp_port_key->udp_dst; + } return 0; } -static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) +static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_tcp *tcp_port_key) { struct tcphdr *th; int err; - err = make_writable(skb, skb_transport_offset(skb) + - sizeof(struct tcphdr)); + err = skb_ensure_writable(skb, skb_transport_offset(skb) + + sizeof(struct tcphdr)); if (unlikely(err)) return err; th = tcp_hdr(skb); - if (tcp_port_key->tcp_src != th->source) + if (tcp_port_key->tcp_src != th->source) { set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); + key->tp.src = tcp_port_key->tcp_src; + } - if (tcp_port_key->tcp_dst != th->dest) + if (tcp_port_key->tcp_dst != th->dest) { set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); + key->tp.dst = tcp_port_key->tcp_dst; + } return 0; } -static int set_sctp(struct sk_buff *skb, - const struct ovs_key_sctp *sctp_port_key) +static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_sctp *sctp_port_key) { struct sctphdr *sh; int err; unsigned int sctphoff = skb_transport_offset(skb); - err = make_writable(skb, sctphoff + sizeof(struct sctphdr)); + err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr)); if (unlikely(err)) return err; @@ -458,39 +522,35 @@ static int set_sctp(struct sk_buff *skb, sh->checksum = old_csum ^ old_correct_csum ^ new_csum; skb_clear_hash(skb); + key->tp.src = sctp_port_key->sctp_src; + key->tp.dst = sctp_port_key->sctp_dst; } return 0; } -static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) +static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port) { - struct vport *vport; - - if (unlikely(!skb)) - return -ENOMEM; + struct vport *vport = ovs_vport_rcu(dp, out_port); - vport = ovs_vport_rcu(dp, out_port); - if (unlikely(!vport)) { + if (likely(vport)) + ovs_vport_send(vport, skb); + else kfree_skb(skb); - return -ENODEV; - } - - ovs_vport_send(vport, skb); - return 0; } static int output_userspace(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr) { + struct ovs_tunnel_info info; struct dp_upcall_info upcall; const struct nlattr *a; int rem; upcall.cmd = OVS_PACKET_CMD_ACTION; - upcall.key = key; upcall.userdata = NULL; upcall.portid = 0; + upcall.egress_tun_info = NULL; for (a = nla_data(attr), rem = nla_len(attr); rem > 0; a = nla_next(a, &rem)) { @@ -502,15 +562,27 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, case OVS_USERSPACE_ATTR_PID: upcall.portid = nla_get_u32(a); break; + + case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: { + /* Get out tunnel info. */ + struct vport *vport; + + vport = ovs_vport_rcu(dp, nla_get_u32(a)); + if (vport) { + int err; + + err = ovs_vport_get_egress_tun_info(vport, skb, + &info); + if (!err) + upcall.egress_tun_info = &info; + } + break; } - } - return ovs_dp_upcall(dp, skb, &upcall); -} + } /* End of switch. */ + } -static bool last_action(const struct nlattr *a, int rem) -{ - return a->nla_len == rem; + return ovs_dp_upcall(dp, skb, key, &upcall); } static int sample(struct datapath *dp, struct sk_buff *skb, @@ -547,7 +619,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb, * user space. This skb will be consumed by its caller. */ if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && - last_action(a, rem))) + nla_is_last(a, rem))) return output_userspace(dp, skb, key, a); skb = skb_clone(skb, GFP_ATOMIC); @@ -580,18 +652,20 @@ static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, key->ovs_flow_hash = hash; } -static int execute_set_action(struct sk_buff *skb, - const struct nlattr *nested_attr) +static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key, + const struct nlattr *nested_attr) { int err = 0; switch (nla_type(nested_attr)) { case OVS_KEY_ATTR_PRIORITY: skb->priority = nla_get_u32(nested_attr); + key->phy.priority = skb->priority; break; case OVS_KEY_ATTR_SKB_MARK: skb->mark = nla_get_u32(nested_attr); + key->phy.skb_mark = skb->mark; break; case OVS_KEY_ATTR_TUNNEL_INFO: @@ -599,27 +673,31 @@ static int execute_set_action(struct sk_buff *skb, break; case OVS_KEY_ATTR_ETHERNET: - err = set_eth_addr(skb, nla_data(nested_attr)); + err = set_eth_addr(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_IPV4: - err = set_ipv4(skb, nla_data(nested_attr)); + err = set_ipv4(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_IPV6: - err = set_ipv6(skb, nla_data(nested_attr)); + err = set_ipv6(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_TCP: - err = set_tcp(skb, nla_data(nested_attr)); + err = set_tcp(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_UDP: - err = set_udp(skb, nla_data(nested_attr)); + err = set_udp(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_SCTP: - err = set_sctp(skb, nla_data(nested_attr)); + err = set_sctp(skb, key, nla_data(nested_attr)); + break; + + case OVS_KEY_ATTR_MPLS: + err = set_mpls(skb, key, nla_data(nested_attr)); break; } @@ -631,13 +709,17 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb, const struct nlattr *a, int rem) { struct deferred_action *da; - int err; - err = ovs_flow_key_update(skb, key); - if (err) - return err; + if (!is_flow_key_valid(key)) { + int err; + + err = ovs_flow_key_update(skb, key); + if (err) + return err; + } + BUG_ON(!is_flow_key_valid(key)); - if (!last_action(a, rem)) { + if (!nla_is_last(a, rem)) { /* Recirc action is the not the last action * of the action list, need to clone the skb. */ @@ -672,7 +754,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, /* Every output action needs a separate clone of 'skb', but the common * case is just a single output action, so that doing a clone and * then freeing the original skbuff is wasteful. So the following code - * is slightly obscure just to avoid that. */ + * is slightly obscure just to avoid that. + */ int prev_port = -1; const struct nlattr *a; int rem; @@ -681,8 +764,12 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, a = nla_next(a, &rem)) { int err = 0; - if (prev_port != -1) { - do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port); + if (unlikely(prev_port != -1)) { + struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC); + + if (out_skb) + do_output(dp, out_skb, prev_port); + prev_port = -1; } @@ -699,19 +786,25 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, execute_hash(skb, key, a); break; + case OVS_ACTION_ATTR_PUSH_MPLS: + err = push_mpls(skb, key, nla_data(a)); + break; + + case OVS_ACTION_ATTR_POP_MPLS: + err = pop_mpls(skb, key, nla_get_be16(a)); + break; + case OVS_ACTION_ATTR_PUSH_VLAN: - err = push_vlan(skb, nla_data(a)); - if (unlikely(err)) /* skb already freed. */ - return err; + err = push_vlan(skb, key, nla_data(a)); break; case OVS_ACTION_ATTR_POP_VLAN: - err = pop_vlan(skb); + err = pop_vlan(skb, key); break; case OVS_ACTION_ATTR_RECIRC: err = execute_recirc(dp, skb, key, a, rem); - if (last_action(a, rem)) { + if (nla_is_last(a, rem)) { /* If this is the last action, the skb has * been consumed or freed. * Return immediately. @@ -721,7 +814,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_SET: - err = execute_set_action(skb, nla_data(a)); + err = execute_set_action(skb, key, nla_data(a)); break; case OVS_ACTION_ATTR_SAMPLE: @@ -771,14 +864,12 @@ static void process_deferred_actions(struct datapath *dp) /* Execute a list of actions against 'skb'. */ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, + const struct sw_flow_actions *acts, struct sw_flow_key *key) { int level = this_cpu_read(exec_actions_level); - struct sw_flow_actions *acts; int err; - acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); - this_cpu_inc(exec_actions_level); OVS_CB(skb)->egress_tun_info = NULL; err = do_execute_actions(dp, skb, key, diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f9e556b5608650e490ad1608f5a28280f55d3d08..332b5a0317392002a18b86dadfd4671e18fb6896 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -59,6 +59,7 @@ #include "vport-netdev.h" int ovs_net_id __read_mostly; +EXPORT_SYMBOL_GPL(ovs_net_id); static struct genl_family dp_packet_genl_family; static struct genl_family dp_flow_genl_family; @@ -130,27 +131,41 @@ int lockdep_ovsl_is_held(void) else return 1; } +EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held); #endif static struct vport *new_vport(const struct vport_parms *); static int queue_gso_packets(struct datapath *dp, struct sk_buff *, + const struct sw_flow_key *, const struct dp_upcall_info *); static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, + const struct sw_flow_key *, const struct dp_upcall_info *); -/* Must be called with rcu_read_lock or ovs_mutex. */ -static struct datapath *get_dp(struct net *net, int dp_ifindex) +/* Must be called with rcu_read_lock. */ +static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex) { - struct datapath *dp = NULL; - struct net_device *dev; + struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex); - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, dp_ifindex); if (dev) { struct vport *vport = ovs_internal_dev_get_vport(dev); if (vport) - dp = vport->dp; + return vport->dp; } + + return NULL; +} + +/* The caller must hold either ovs_mutex or rcu_read_lock to keep the + * returned dp pointer valid. + */ +static inline struct datapath *get_dp(struct net *net, int dp_ifindex) +{ + struct datapath *dp; + + WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held()); + rcu_read_lock(); + dp = get_dp_rcu(net, dp_ifindex); rcu_read_unlock(); return dp; @@ -163,7 +178,7 @@ const char *ovs_dp_name(const struct datapath *dp) return vport->ops->get_name(vport); } -static int get_dpifindex(struct datapath *dp) +static int get_dpifindex(const struct datapath *dp) { struct vport *local; int ifindex; @@ -185,6 +200,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu) { struct datapath *dp = container_of(rcu, struct datapath, rcu); + ovs_flow_tbl_destroy(&dp->table); free_percpu(dp->stats_percpu); release_net(ovs_dp_get_net(dp)); kfree(dp->ports); @@ -243,6 +259,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) const struct vport *p = OVS_CB(skb)->input_vport; struct datapath *dp = p->dp; struct sw_flow *flow; + struct sw_flow_actions *sf_acts; struct dp_stats_percpu *stats; u64 *stats_counter; u32 n_mask_hit; @@ -256,10 +273,10 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) int error; upcall.cmd = OVS_PACKET_CMD_MISS; - upcall.key = key; upcall.userdata = NULL; upcall.portid = ovs_vport_find_upcall_portid(p, skb); - error = ovs_dp_upcall(dp, skb, &upcall); + upcall.egress_tun_info = NULL; + error = ovs_dp_upcall(dp, skb, key, &upcall); if (unlikely(error)) kfree_skb(skb); else @@ -268,10 +285,10 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) goto out; } - OVS_CB(skb)->flow = flow; + ovs_flow_stats_update(flow, key->tp.flags, skb); + sf_acts = rcu_dereference(flow->sf_acts); + ovs_execute_actions(dp, skb, sf_acts, key); - ovs_flow_stats_update(OVS_CB(skb)->flow, key->tp.flags, skb); - ovs_execute_actions(dp, skb, key); stats_counter = &stats->n_hit; out: @@ -283,6 +300,7 @@ out: } int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, + const struct sw_flow_key *key, const struct dp_upcall_info *upcall_info) { struct dp_stats_percpu *stats; @@ -294,9 +312,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, } if (!skb_is_gso(skb)) - err = queue_userspace_packet(dp, skb, upcall_info); + err = queue_userspace_packet(dp, skb, key, upcall_info); else - err = queue_gso_packets(dp, skb, upcall_info); + err = queue_gso_packets(dp, skb, key, upcall_info); if (err) goto err; @@ -313,39 +331,43 @@ err: } static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, + const struct sw_flow_key *key, const struct dp_upcall_info *upcall_info) { unsigned short gso_type = skb_shinfo(skb)->gso_type; - struct dp_upcall_info later_info; struct sw_flow_key later_key; struct sk_buff *segs, *nskb; + struct ovs_skb_cb ovs_cb; int err; + ovs_cb = *OVS_CB(skb); segs = __skb_gso_segment(skb, NETIF_F_SG, false); + *OVS_CB(skb) = ovs_cb; if (IS_ERR(segs)) return PTR_ERR(segs); if (segs == NULL) return -EINVAL; + if (gso_type & SKB_GSO_UDP) { + /* The initial flow key extracted by ovs_flow_key_extract() + * in this case is for a first fragment, so we need to + * properly mark later fragments. + */ + later_key = *key; + later_key.ip.frag = OVS_FRAG_TYPE_LATER; + } + /* Queue all of the segments. */ skb = segs; do { - err = queue_userspace_packet(dp, skb, upcall_info); + *OVS_CB(skb) = ovs_cb; + if (gso_type & SKB_GSO_UDP && skb != segs) + key = &later_key; + + err = queue_userspace_packet(dp, skb, key, upcall_info); if (err) break; - if (skb == segs && gso_type & SKB_GSO_UDP) { - /* The initial flow key extracted by ovs_flow_extract() - * in this case is for a first fragment, so we need to - * properly mark later fragments. - */ - later_key = *upcall_info->key; - later_key.ip.frag = OVS_FRAG_TYPE_LATER; - - later_info = *upcall_info; - later_info.key = &later_key; - upcall_info = &later_info; - } } while ((skb = skb->next)); /* Free all of the segments. */ @@ -360,46 +382,26 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, return err; } -static size_t key_attr_size(void) -{ - return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ - + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ - + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ - + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ - + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ - + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ - + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ - + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ - + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ - + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */ - + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */ - + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ - + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ - + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ - + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ - + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */ - + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ - + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ - + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ - + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ - + nla_total_size(28); /* OVS_KEY_ATTR_ND */ -} - -static size_t upcall_msg_size(const struct nlattr *userdata, +static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, unsigned int hdrlen) { size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ - + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */ + + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */ /* OVS_PACKET_ATTR_USERDATA */ - if (userdata) - size += NLA_ALIGN(userdata->nla_len); + if (upcall_info->userdata) + size += NLA_ALIGN(upcall_info->userdata->nla_len); + + /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */ + if (upcall_info->egress_tun_info) + size += nla_total_size(ovs_tun_key_attr_size()); return size; } static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, + const struct sw_flow_key *key, const struct dp_upcall_info *upcall_info) { struct ovs_header *upcall; @@ -423,11 +425,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, if (!nskb) return -ENOMEM; - nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb)); + nskb = __vlan_hwaccel_push_inside(nskb); if (!nskb) return -ENOMEM; - nskb->vlan_tci = 0; skb = nskb; } @@ -450,7 +451,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, else hlen = skb->len; - len = upcall_msg_size(upcall_info->userdata, hlen); + len = upcall_msg_size(upcall_info, hlen); user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; @@ -462,7 +463,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); - err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb); + err = ovs_nla_put_flow(key, key, user_skb); BUG_ON(err); nla_nest_end(user_skb, nla); @@ -471,6 +472,14 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, nla_len(upcall_info->userdata), nla_data(upcall_info->userdata)); + if (upcall_info->egress_tun_info) { + nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY); + err = ovs_nla_put_egress_tunnel_key(user_skb, + upcall_info->egress_tun_info); + BUG_ON(err); + nla_nest_end(user_skb, nla); + } + /* Only reserve room for attribute header, packet data is added * in skb_zerocopy() */ if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) { @@ -510,11 +519,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) struct sw_flow_actions *acts; struct sk_buff *packet; struct sw_flow *flow; + struct sw_flow_actions *sf_acts; struct datapath *dp; struct ethhdr *eth; struct vport *input_vport; int len; int err; + bool log = !a[OVS_FLOW_ATTR_PROBE]; err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || @@ -548,29 +559,22 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) goto err_kfree_skb; err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet, - &flow->key); + &flow->key, log); if (err) goto err_flow_free; - acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); - err = PTR_ERR(acts); - if (IS_ERR(acts)) - goto err_flow_free; - err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], - &flow->key, 0, &acts); + &flow->key, &acts, log); if (err) goto err_flow_free; rcu_assign_pointer(flow->sf_acts, acts); - OVS_CB(packet)->egress_tun_info = NULL; - OVS_CB(packet)->flow = flow; packet->priority = flow->key.phy.priority; packet->mark = flow->key.phy.skb_mark; rcu_read_lock(); - dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); err = -ENODEV; if (!dp) goto err_unlock; @@ -583,9 +587,10 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) goto err_unlock; OVS_CB(packet)->input_vport = input_vport; + sf_acts = rcu_dereference(flow->sf_acts); local_bh_disable(); - err = ovs_execute_actions(dp, packet, &flow->key); + err = ovs_execute_actions(dp, packet, sf_acts, &flow->key); local_bh_enable(); rcu_read_unlock(); @@ -628,7 +633,7 @@ static struct genl_family dp_packet_genl_family = { .n_ops = ARRAY_SIZE(dp_packet_genl_ops), }; -static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, +static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, struct ovs_dp_megaflow_stats *mega_stats) { int i; @@ -662,8 +667,8 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) { return NLMSG_ALIGN(sizeof(struct ovs_header)) - + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */ - + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */ + + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */ + + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */ + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ @@ -671,58 +676,67 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) } /* Called with ovs_mutex or RCU read lock. */ -static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, - struct sk_buff *skb, u32 portid, - u32 seq, u32 flags, u8 cmd) +static int ovs_flow_cmd_fill_match(const struct sw_flow *flow, + struct sk_buff *skb) { - const int skb_orig_len = skb->len; - struct nlattr *start; - struct ovs_flow_stats stats; - __be16 tcp_flags; - unsigned long used; - struct ovs_header *ovs_header; struct nlattr *nla; int err; - ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); - if (!ovs_header) - return -EMSGSIZE; - - ovs_header->dp_ifindex = dp_ifindex; - /* Fill flow key. */ nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); if (!nla) - goto nla_put_failure; + return -EMSGSIZE; err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb); if (err) - goto error; + return err; + nla_nest_end(skb, nla); + /* Fill flow mask. */ nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK); if (!nla) - goto nla_put_failure; + return -EMSGSIZE; err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb); if (err) - goto error; + return err; nla_nest_end(skb, nla); + return 0; +} + +/* Called with ovs_mutex or RCU read lock. */ +static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow, + struct sk_buff *skb) +{ + struct ovs_flow_stats stats; + __be16 tcp_flags; + unsigned long used; ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); if (used && nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) - goto nla_put_failure; + return -EMSGSIZE; if (stats.n_packets && nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats)) - goto nla_put_failure; + return -EMSGSIZE; if ((u8)ntohs(tcp_flags) && nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags))) - goto nla_put_failure; + return -EMSGSIZE; + + return 0; +} + +/* Called with ovs_mutex or RCU read lock. */ +static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow, + struct sk_buff *skb, int skb_orig_len) +{ + struct nlattr *start; + int err; /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if * this is the first flow to be dumped into 'skb'. This is unusual for @@ -746,17 +760,47 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, nla_nest_end(skb, start); else { if (skb_orig_len) - goto error; + return err; nla_nest_cancel(skb, start); } - } else if (skb_orig_len) - goto nla_put_failure; + } else if (skb_orig_len) { + return -EMSGSIZE; + } + + return 0; +} + +/* Called with ovs_mutex or RCU read lock. */ +static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, + struct sk_buff *skb, u32 portid, + u32 seq, u32 flags, u8 cmd) +{ + const int skb_orig_len = skb->len; + struct ovs_header *ovs_header; + int err; + + ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, + flags, cmd); + if (!ovs_header) + return -EMSGSIZE; + + ovs_header->dp_ifindex = dp_ifindex; + + err = ovs_flow_cmd_fill_match(flow, skb); + if (err) + goto error; + + err = ovs_flow_cmd_fill_stats(flow, skb); + if (err) + goto error; + + err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len); + if (err) + goto error; return genlmsg_end(skb, ovs_header); -nla_put_failure: - err = -EMSGSIZE; error: genlmsg_cancel(skb, ovs_header); return err; @@ -811,13 +855,18 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) struct sw_flow_actions *acts; struct sw_flow_match match; int error; + bool log = !a[OVS_FLOW_ATTR_PROBE]; /* Must have key and actions. */ error = -EINVAL; - if (!a[OVS_FLOW_ATTR_KEY]) + if (!a[OVS_FLOW_ATTR_KEY]) { + OVS_NLERR(log, "Flow key attr not present in new flow."); goto error; - if (!a[OVS_FLOW_ATTR_ACTIONS]) + } + if (!a[OVS_FLOW_ATTR_ACTIONS]) { + OVS_NLERR(log, "Flow actions attr not present in new flow."); goto error; + } /* Most of the time we need to allocate a new flow, do it before * locking. @@ -830,24 +879,19 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) /* Extract key. */ ovs_match_init(&match, &new_flow->unmasked_key, &mask); - error = ovs_nla_get_match(&match, - a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); + error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], + a[OVS_FLOW_ATTR_MASK], log); if (error) goto err_kfree_flow; ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask); /* Validate actions. */ - acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); - error = PTR_ERR(acts); - if (IS_ERR(acts)) - goto err_kfree_flow; - error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, - 0, &acts); + &acts, log); if (error) { - OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); - goto err_kfree_acts; + OVS_NLERR(log, "Flow actions may not be safe on all matching packets."); + goto err_kfree_flow; } reply = ovs_flow_cmd_alloc_info(acts, info, false); @@ -899,6 +943,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) } /* The unmasked key has to be the same for flow updates. */ if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { + /* Look for any overlapping flow. */ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); if (!flow) { error = -ENOENT; @@ -938,23 +983,21 @@ error: return error; } +/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a, const struct sw_flow_key *key, - const struct sw_flow_mask *mask) + const struct sw_flow_mask *mask, + bool log) { struct sw_flow_actions *acts; struct sw_flow_key masked_key; int error; - acts = ovs_nla_alloc_flow_actions(nla_len(a)); - if (IS_ERR(acts)) - return acts; - ovs_flow_mask_key(&masked_key, key, mask); - error = ovs_nla_copy_actions(a, &masked_key, 0, &acts); + error = ovs_nla_copy_actions(a, &masked_key, &acts, log); if (error) { - OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); - kfree(acts); + OVS_NLERR(log, + "Actions may not be safe on all matching packets"); return ERR_PTR(error); } @@ -973,29 +1016,31 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) struct sw_flow_actions *old_acts = NULL, *acts = NULL; struct sw_flow_match match; int error; + bool log = !a[OVS_FLOW_ATTR_PROBE]; /* Extract key. */ error = -EINVAL; - if (!a[OVS_FLOW_ATTR_KEY]) + if (!a[OVS_FLOW_ATTR_KEY]) { + OVS_NLERR(log, "Flow key attribute not present in set flow."); goto error; + } ovs_match_init(&match, &key, &mask); - error = ovs_nla_get_match(&match, - a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); + error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], + a[OVS_FLOW_ATTR_MASK], log); if (error) goto error; /* Validate actions. */ if (a[OVS_FLOW_ATTR_ACTIONS]) { - acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask); + acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask, + log); if (IS_ERR(acts)) { error = PTR_ERR(acts); goto error; } - } - /* Can allocate before locking if have acts. */ - if (acts) { + /* Can allocate before locking if have acts. */ reply = ovs_flow_cmd_alloc_info(acts, info, false); if (IS_ERR(reply)) { error = PTR_ERR(reply); @@ -1070,14 +1115,16 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) struct datapath *dp; struct sw_flow_match match; int err; + bool log = !a[OVS_FLOW_ATTR_PROBE]; if (!a[OVS_FLOW_ATTR_KEY]) { - OVS_NLERR("Flow get message rejected, Key attribute missing.\n"); + OVS_NLERR(log, + "Flow get message rejected, Key attribute missing."); return -EINVAL; } ovs_match_init(&match, &key, NULL); - err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); + err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, log); if (err) return err; @@ -1118,10 +1165,12 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) struct datapath *dp; struct sw_flow_match match; int err; + bool log = !a[OVS_FLOW_ATTR_PROBE]; if (likely(a[OVS_FLOW_ATTR_KEY])) { ovs_match_init(&match, &key, NULL); - err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); + err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, + log); if (unlikely(err)) return err; } @@ -1179,7 +1228,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) struct datapath *dp; rcu_read_lock(); - dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); if (!dp) { rcu_read_unlock(); return -ENODEV; @@ -1211,8 +1260,10 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, + [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED }, [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, + [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG }, }; static const struct genl_ops dp_flow_genl_ops[] = { @@ -1313,7 +1364,7 @@ static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info) /* Called with rcu_read_lock or ovs_mutex. */ static struct datapath *lookup_datapath(struct net *net, - struct ovs_header *ovs_header, + const struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1]) { struct datapath *dp; @@ -1341,7 +1392,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in dp->user_features = 0; } -static void ovs_dp_change(struct datapath *dp, struct nlattr **a) +static void ovs_dp_change(struct datapath *dp, struct nlattr *a[]) { if (a[OVS_DP_ATTR_USER_FEATURES]) dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]); @@ -1442,7 +1493,7 @@ err_destroy_ports_array: err_destroy_percpu: free_percpu(dp->stats_percpu); err_destroy_table: - ovs_flow_tbl_destroy(&dp->table, false); + ovs_flow_tbl_destroy(&dp->table); err_free_dp: release_net(ovs_dp_get_net(dp)); kfree(dp); @@ -1474,8 +1525,6 @@ static void __dp_destroy(struct datapath *dp) ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); /* RCU destroy the flow table */ - ovs_flow_tbl_destroy(&dp->table, true); - call_rcu(&dp->rcu, destroy_dp_rcu); } @@ -1707,7 +1756,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, /* Called with ovs_mutex or RCU read lock. */ static struct vport *lookup_vport(struct net *net, - struct ovs_header *ovs_header, + const struct ovs_header *ovs_header, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) { struct datapath *dp; @@ -1764,6 +1813,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; ovs_lock(); +restart: dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); err = -ENODEV; if (!dp) @@ -1795,8 +1845,11 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) vport = new_vport(&parms); err = PTR_ERR(vport); - if (IS_ERR(vport)) + if (IS_ERR(vport)) { + if (err == -EAGAIN) + goto restart; goto exit_unlock_free; + } err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, info->snd_seq, 0, OVS_VPORT_CMD_NEW); @@ -1939,7 +1992,7 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int i, j = 0; rcu_read_lock(); - dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); if (!dp) { rcu_read_unlock(); return -ENODEV; @@ -2112,12 +2165,18 @@ static int __init dp_init(void) if (err) goto error_netns_exit; + err = ovs_netdev_init(); + if (err) + goto error_unreg_notifier; + err = dp_register_genl(); if (err < 0) - goto error_unreg_notifier; + goto error_unreg_netdev; return 0; +error_unreg_netdev: + ovs_netdev_exit(); error_unreg_notifier: unregister_netdevice_notifier(&ovs_dp_device_notifier); error_netns_exit: @@ -2137,6 +2196,7 @@ error: static void dp_cleanup(void) { dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); + ovs_netdev_exit(); unregister_netdevice_notifier(&ovs_dp_device_notifier); unregister_pernet_device(&ovs_net_ops); rcu_barrier(); diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 974135439c5c77af2e7ee0684054589e1a81779c..3ece94563079fb154d437d75e90db23b055d6a0a 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -94,14 +94,12 @@ struct datapath { /** * struct ovs_skb_cb - OVS data in skb CB - * @flow: The flow associated with this packet. May be %NULL if no flow. * @egress_tun_key: Tunnel information about this packet on egress path. * NULL if the packet is not being tunneled. * @input_vport: The original vport packet came in on. This value is cached * when a packet is received by OVS. */ struct ovs_skb_cb { - struct sw_flow *flow; struct ovs_tunnel_info *egress_tun_info; struct vport *input_vport; }; @@ -110,18 +108,18 @@ struct ovs_skb_cb { /** * struct dp_upcall - metadata to include with a packet to send to userspace * @cmd: One of %OVS_PACKET_CMD_*. - * @key: Becomes %OVS_PACKET_ATTR_KEY. Must be nonnull. * @userdata: If nonnull, its variable-length value is passed to userspace as * %OVS_PACKET_ATTR_USERDATA. - * @pid: Netlink PID to which packet should be sent. If @pid is 0 then no - * packet is sent and the packet is accounted in the datapath's @n_lost + * @portid: Netlink portid to which packet should be sent. If @portid is 0 + * then no packet is sent and the packet is accounted in the datapath's @n_lost * counter. + * @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY. */ struct dp_upcall_info { - u8 cmd; - const struct sw_flow_key *key; + const struct ovs_tunnel_info *egress_tun_info; const struct nlattr *userdata; u32 portid; + u8 cmd; }; /** @@ -151,7 +149,7 @@ int lockdep_ovsl_is_held(void); #define rcu_dereference_ovsl(p) \ rcu_dereference_check(p, lockdep_ovsl_is_held()) -static inline struct net *ovs_dp_get_net(struct datapath *dp) +static inline struct net *ovs_dp_get_net(const struct datapath *dp) { return read_pnet(&dp->net); } @@ -187,23 +185,23 @@ extern struct genl_family dp_vport_genl_family; void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key); void ovs_dp_detach_port(struct vport *); int ovs_dp_upcall(struct datapath *, struct sk_buff *, - const struct dp_upcall_info *); + const struct sw_flow_key *, const struct dp_upcall_info *); const char *ovs_dp_name(const struct datapath *dp); struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, u8 cmd); int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, - struct sw_flow_key *); + const struct sw_flow_actions *, struct sw_flow_key *); void ovs_dp_notify_wq(struct work_struct *work); int action_fifos_init(void); void action_fifos_exit(void); -#define OVS_NLERR(fmt, ...) \ +#define OVS_NLERR(logging_allowed, fmt, ...) \ do { \ - if (net_ratelimit()) \ - pr_info("netlink: " fmt, ##__VA_ARGS__); \ + if (logging_allowed && net_ratelimit()) \ + pr_info("netlink: " fmt "\n", ##__VA_ARGS__); \ } while (0) #endif /* datapath.h */ diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 2b78789ea7c5aa93afce5fe923d1af849547428c..70bef2ab7f2bc6017081ad780a8c5cb1aa9feb67 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +43,7 @@ #include #include #include +#include #include #include "datapath.h" @@ -64,7 +66,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, - struct sk_buff *skb) + const struct sk_buff *skb) { struct flow_stats *stats; int node = numa_node_id(); @@ -480,6 +482,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) return -ENOMEM; skb_reset_network_header(skb); + skb_reset_mac_len(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ @@ -584,6 +587,33 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv4, 0, sizeof(key->ipv4)); } + } else if (eth_p_mpls(key->eth.type)) { + size_t stack_len = MPLS_HLEN; + + /* In the presence of an MPLS label stack the end of the L2 + * header and the beginning of the L3 header differ. + * + * Advance network_header to the beginning of the L3 + * header. mac_len corresponds to the end of the L2 header. + */ + while (1) { + __be32 lse; + + error = check_header(skb, skb->mac_len + stack_len); + if (unlikely(error)) + return 0; + + memcpy(&lse, skb_network_header(skb), MPLS_HLEN); + + if (stack_len == MPLS_HLEN) + memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN); + + skb_set_network_header(skb, skb->mac_len + stack_len); + if (lse & htonl(MPLS_LS_S_MASK)) + break; + + stack_len += MPLS_HLEN; + } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ @@ -649,7 +679,7 @@ int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) return key_extract(skb, key); } -int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, +int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info, struct sk_buff *skb, struct sw_flow_key *key) { /* Extract metadata from packet. */ @@ -682,12 +712,12 @@ int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, int ovs_flow_key_extract_userspace(const struct nlattr *attr, struct sk_buff *skb, - struct sw_flow_key *key) + struct sw_flow_key *key, bool log) { int err; /* Extract metadata from netlink attributes. */ - err = ovs_nla_get_flow_metadata(attr, key); + err = ovs_nla_get_flow_metadata(attr, key, log); if (err) return err; diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 71813318c8c73ef1efb27f8a22bcb062ac81a89a..a8b30f334388e44aea96fecf077801e1deec4c0e 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -37,8 +37,8 @@ struct sk_buff; /* Used to memset ovs_key_ipv4_tunnel padding. */ #define OVS_TUNNEL_KEY_SIZE \ - (offsetof(struct ovs_key_ipv4_tunnel, ipv4_ttl) + \ - FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, ipv4_ttl)) + (offsetof(struct ovs_key_ipv4_tunnel, tp_dst) + \ + FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, tp_dst)) struct ovs_key_ipv4_tunnel { __be64 tun_id; @@ -47,11 +47,13 @@ struct ovs_key_ipv4_tunnel { __be16 tun_flags; u8 ipv4_tos; u8 ipv4_ttl; + __be16 tp_src; + __be16 tp_dst; } __packed __aligned(4); /* Minimize padding. */ struct ovs_tunnel_info { struct ovs_key_ipv4_tunnel tunnel; - struct geneve_opt *options; + const struct geneve_opt *options; u8 options_len; }; @@ -64,27 +66,59 @@ struct ovs_tunnel_info { FIELD_SIZEOF(struct sw_flow_key, tun_opts) - \ opt_len)) -static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info, - const struct iphdr *iph, - __be64 tun_id, __be16 tun_flags, - struct geneve_opt *opts, - u8 opts_len) +static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info, + __be32 saddr, __be32 daddr, + u8 tos, u8 ttl, + __be16 tp_src, + __be16 tp_dst, + __be64 tun_id, + __be16 tun_flags, + const struct geneve_opt *opts, + u8 opts_len) { tun_info->tunnel.tun_id = tun_id; - tun_info->tunnel.ipv4_src = iph->saddr; - tun_info->tunnel.ipv4_dst = iph->daddr; - tun_info->tunnel.ipv4_tos = iph->tos; - tun_info->tunnel.ipv4_ttl = iph->ttl; + tun_info->tunnel.ipv4_src = saddr; + tun_info->tunnel.ipv4_dst = daddr; + tun_info->tunnel.ipv4_tos = tos; + tun_info->tunnel.ipv4_ttl = ttl; tun_info->tunnel.tun_flags = tun_flags; - /* clear struct padding. */ - memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, 0, - sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE); + /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of + * the upper tunnel are used. + * E.g: GRE over IPSEC, the tp_src and tp_port are zero. + */ + tun_info->tunnel.tp_src = tp_src; + tun_info->tunnel.tp_dst = tp_dst; + + /* Clear struct padding. */ + if (sizeof(tun_info->tunnel) != OVS_TUNNEL_KEY_SIZE) + memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, + 0, sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE); tun_info->options = opts; tun_info->options_len = opts_len; } +static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info, + const struct iphdr *iph, + __be16 tp_src, + __be16 tp_dst, + __be64 tun_id, + __be16 tun_flags, + const struct geneve_opt *opts, + u8 opts_len) +{ + __ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr, + iph->tos, iph->ttl, + tp_src, tp_dst, + tun_id, tun_flags, + opts, opts_len); +} + +#define OVS_SW_FLOW_KEY_METADATA_SIZE \ + (offsetof(struct sw_flow_key, recirc_id) + \ + FIELD_SIZEOF(struct sw_flow_key, recirc_id)) + struct sw_flow_key { u8 tun_opts[255]; u8 tun_opts_len; @@ -102,12 +136,17 @@ struct sw_flow_key { __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */ __be16 type; /* Ethernet frame type. */ } eth; - struct { - u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */ - u8 tos; /* IP ToS. */ - u8 ttl; /* IP TTL/hop limit. */ - u8 frag; /* One of OVS_FRAG_TYPE_*. */ - } ip; + union { + struct { + __be32 top_lse; /* top label stack entry */ + } mpls; + struct { + u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */ + u8 tos; /* IP ToS. */ + u8 ttl; /* IP TTL/hop limit. */ + u8 frag; /* One of OVS_FRAG_TYPE_*. */ + } ip; + }; struct { __be16 src; /* TCP/UDP/SCTP source port. */ __be16 dst; /* TCP/UDP/SCTP destination port. */ @@ -205,18 +244,19 @@ struct arp_eth_header { } __packed; void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags, - struct sk_buff *); + const struct sk_buff *); void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, unsigned long *used, __be16 *tcp_flags); void ovs_flow_stats_clear(struct sw_flow *); u64 ovs_flow_used_time(unsigned long flow_jiffies); int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); -int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, struct sk_buff *skb, +int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info, + struct sk_buff *skb, struct sw_flow_key *key); /* Extract key from packet coming from userspace. */ int ovs_flow_key_extract_userspace(const struct nlattr *attr, struct sk_buff *skb, - struct sw_flow_key *key); + struct sw_flow_key *key, bool log); #endif /* flow.h */ diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 089b195c064ae56445b458e52e0dcaefa90b615c..9645a21d9eaa316057c0ed137a43bce11f34b70a 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -46,24 +46,22 @@ #include #include #include +#include #include "flow_netlink.h" -static void update_range__(struct sw_flow_match *match, - size_t offset, size_t size, bool is_mask) +static void update_range(struct sw_flow_match *match, + size_t offset, size_t size, bool is_mask) { - struct sw_flow_key_range *range = NULL; + struct sw_flow_key_range *range; size_t start = rounddown(offset, sizeof(long)); size_t end = roundup(offset + size, sizeof(long)); if (!is_mask) range = &match->range; - else if (match->mask) + else range = &match->mask->range; - if (!range) - return; - if (range->start == range->end) { range->start = start; range->end = end; @@ -79,22 +77,20 @@ static void update_range__(struct sw_flow_match *match, #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ do { \ - update_range__(match, offsetof(struct sw_flow_key, field), \ - sizeof((match)->key->field), is_mask); \ - if (is_mask) { \ - if ((match)->mask) \ - (match)->mask->key.field = value; \ - } else { \ + update_range(match, offsetof(struct sw_flow_key, field), \ + sizeof((match)->key->field), is_mask); \ + if (is_mask) \ + (match)->mask->key.field = value; \ + else \ (match)->key->field = value; \ - } \ } while (0) #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \ do { \ - update_range__(match, offset, len, is_mask); \ + update_range(match, offset, len, is_mask); \ if (is_mask) \ memcpy((u8 *)&(match)->mask->key + offset, value_p, \ - len); \ + len); \ else \ memcpy((u8 *)(match)->key + offset, value_p, len); \ } while (0) @@ -103,22 +99,20 @@ static void update_range__(struct sw_flow_match *match, SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ value_p, len, is_mask) -#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ - do { \ - update_range__(match, offsetof(struct sw_flow_key, field), \ - sizeof((match)->key->field), is_mask); \ - if (is_mask) { \ - if ((match)->mask) \ - memset((u8 *)&(match)->mask->key.field, value,\ - sizeof((match)->mask->key.field)); \ - } else { \ +#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ + do { \ + update_range(match, offsetof(struct sw_flow_key, field), \ + sizeof((match)->key->field), is_mask); \ + if (is_mask) \ + memset((u8 *)&(match)->mask->key.field, value, \ + sizeof((match)->mask->key.field)); \ + else \ memset((u8 *)&(match)->key->field, value, \ sizeof((match)->key->field)); \ - } \ } while (0) static bool match_validate(const struct sw_flow_match *match, - u64 key_attrs, u64 mask_attrs) + u64 key_attrs, u64 mask_attrs, bool log) { u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET; u64 mask_allowed = key_attrs; /* At most allow all key attributes */ @@ -134,7 +128,8 @@ static bool match_validate(const struct sw_flow_match *match, | (1 << OVS_KEY_ATTR_ICMP) | (1 << OVS_KEY_ATTR_ICMPV6) | (1 << OVS_KEY_ATTR_ARP) - | (1 << OVS_KEY_ATTR_ND)); + | (1 << OVS_KEY_ATTR_ND) + | (1 << OVS_KEY_ATTR_MPLS)); /* Always allowed mask fields. */ mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) @@ -145,10 +140,16 @@ static bool match_validate(const struct sw_flow_match *match, if (match->key->eth.type == htons(ETH_P_ARP) || match->key->eth.type == htons(ETH_P_RARP)) { key_expected |= 1 << OVS_KEY_ATTR_ARP; - if (match->mask && (match->mask->key.tp.src == htons(0xff))) + if (match->mask && (match->mask->key.eth.type == htons(0xffff))) mask_allowed |= 1 << OVS_KEY_ATTR_ARP; } + if (eth_p_mpls(match->key->eth.type)) { + key_expected |= 1 << OVS_KEY_ATTR_MPLS; + if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + mask_allowed |= 1 << OVS_KEY_ATTR_MPLS; + } + if (match->key->eth.type == htons(ETH_P_IP)) { key_expected |= 1 << OVS_KEY_ATTR_IPV4; if (match->mask && (match->mask->key.eth.type == htons(0xffff))) @@ -220,7 +221,7 @@ static bool match_validate(const struct sw_flow_match *match, htons(NDISC_NEIGHBOUR_SOLICITATION) || match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { key_expected |= 1 << OVS_KEY_ATTR_ND; - if (match->mask && (match->mask->key.tp.src == htons(0xffff))) + if (match->mask && (match->mask->key.tp.src == htons(0xff))) mask_allowed |= 1 << OVS_KEY_ATTR_ND; } } @@ -229,21 +230,65 @@ static bool match_validate(const struct sw_flow_match *match, if ((key_attrs & key_expected) != key_expected) { /* Key attributes check failed. */ - OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", - (unsigned long long)key_attrs, (unsigned long long)key_expected); + OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)", + (unsigned long long)key_attrs, + (unsigned long long)key_expected); return false; } if ((mask_attrs & mask_allowed) != mask_attrs) { /* Mask attributes check failed. */ - OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", - (unsigned long long)mask_attrs, (unsigned long long)mask_allowed); + OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)", + (unsigned long long)mask_attrs, + (unsigned long long)mask_allowed); return false; } return true; } +size_t ovs_tun_key_attr_size(void) +{ + /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider + * updating this function. + */ + return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ + + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ + + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ + + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ + + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */ + + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */ + + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */ + + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */ +} + +size_t ovs_key_attr_size(void) +{ + /* Whenever adding new OVS_KEY_ FIELDS, we should consider + * updating this function. + */ + BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22); + + return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ + + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ + + ovs_tun_key_attr_size() + + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ + + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ + + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */ + + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */ + + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ + + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */ + + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ + + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ + + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ + + nla_total_size(28); /* OVS_KEY_ATTR_ND */ +} + /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ENCAP] = -1, @@ -266,6 +311,7 @@ static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32), [OVS_KEY_ATTR_DP_HASH] = sizeof(u32), [OVS_KEY_ATTR_TUNNEL] = -1, + [OVS_KEY_ATTR_MPLS] = sizeof(struct ovs_key_mpls), }; static bool is_all_zero(const u8 *fp, size_t size) @@ -284,7 +330,7 @@ static bool is_all_zero(const u8 *fp, size_t size) static int __parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[], - u64 *attrsp, bool nz) + u64 *attrsp, bool log, bool nz) { const struct nlattr *nla; u64 attrs; @@ -296,21 +342,20 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, int expected_len; if (type > OVS_KEY_ATTR_MAX) { - OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", + OVS_NLERR(log, "Key type %d is out of range max %d", type, OVS_KEY_ATTR_MAX); return -EINVAL; } if (attrs & (1 << type)) { - OVS_NLERR("Duplicate key attribute (type %d).\n", type); + OVS_NLERR(log, "Duplicate key (type %d).", type); return -EINVAL; } expected_len = ovs_key_lens[type]; if (nla_len(nla) != expected_len && expected_len != -1) { - OVS_NLERR("Key attribute has unexpected length (type=%d" - ", length=%d, expected=%d).\n", type, - nla_len(nla), expected_len); + OVS_NLERR(log, "Key %d has unexpected len %d expected %d", + type, nla_len(nla), expected_len); return -EINVAL; } @@ -320,7 +365,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, } } if (rem) { - OVS_NLERR("Message has %d unknown bytes.\n", rem); + OVS_NLERR(log, "Message has %d unknown bytes.", rem); return -EINVAL; } @@ -329,28 +374,84 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, } static int parse_flow_mask_nlattrs(const struct nlattr *attr, - const struct nlattr *a[], u64 *attrsp) + const struct nlattr *a[], u64 *attrsp, + bool log) { - return __parse_flow_nlattrs(attr, a, attrsp, true); + return __parse_flow_nlattrs(attr, a, attrsp, log, true); } static int parse_flow_nlattrs(const struct nlattr *attr, - const struct nlattr *a[], u64 *attrsp) + const struct nlattr *a[], u64 *attrsp, + bool log) { - return __parse_flow_nlattrs(attr, a, attrsp, false); + return __parse_flow_nlattrs(attr, a, attrsp, log, false); +} + +static int genev_tun_opt_from_nlattr(const struct nlattr *a, + struct sw_flow_match *match, bool is_mask, + bool log) +{ + unsigned long opt_key_offset; + + if (nla_len(a) > sizeof(match->key->tun_opts)) { + OVS_NLERR(log, "Geneve option length err (len %d, max %zu).", + nla_len(a), sizeof(match->key->tun_opts)); + return -EINVAL; + } + + if (nla_len(a) % 4 != 0) { + OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.", + nla_len(a)); + return -EINVAL; + } + + /* We need to record the length of the options passed + * down, otherwise packets with the same format but + * additional options will be silently matched. + */ + if (!is_mask) { + SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a), + false); + } else { + /* This is somewhat unusual because it looks at + * both the key and mask while parsing the + * attributes (and by extension assumes the key + * is parsed first). Normally, we would verify + * that each is the correct length and that the + * attributes line up in the validate function. + * However, that is difficult because this is + * variable length and we won't have the + * information later. + */ + if (match->key->tun_opts_len != nla_len(a)) { + OVS_NLERR(log, "Geneve option len %d != mask len %d", + match->key->tun_opts_len, nla_len(a)); + return -EINVAL; + } + + SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true); + } + + opt_key_offset = (unsigned long)GENEVE_OPTS((struct sw_flow_key *)0, + nla_len(a)); + SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a), + nla_len(a), is_mask); + return 0; } static int ipv4_tun_from_nlattr(const struct nlattr *attr, - struct sw_flow_match *match, bool is_mask) + struct sw_flow_match *match, bool is_mask, + bool log) { struct nlattr *a; int rem; bool ttl = false; __be16 tun_flags = 0; - unsigned long opt_key_offset; nla_for_each_nested(a, attr, rem) { int type = nla_type(a); + int err; + static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64), [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32), @@ -359,20 +460,21 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, [OVS_TUNNEL_KEY_ATTR_TTL] = 1, [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, + [OVS_TUNNEL_KEY_ATTR_TP_SRC] = sizeof(u16), + [OVS_TUNNEL_KEY_ATTR_TP_DST] = sizeof(u16), [OVS_TUNNEL_KEY_ATTR_OAM] = 0, [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = -1, }; if (type > OVS_TUNNEL_KEY_ATTR_MAX) { - OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n", - type, OVS_TUNNEL_KEY_ATTR_MAX); + OVS_NLERR(log, "Tunnel attr %d out of range max %d", + type, OVS_TUNNEL_KEY_ATTR_MAX); return -EINVAL; } if (ovs_tunnel_key_lens[type] != nla_len(a) && ovs_tunnel_key_lens[type] != -1) { - OVS_NLERR("IPv4 tunnel attribute type has unexpected " - " length (type=%d, length=%d, expected=%d).\n", + OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", type, nla_len(a), ovs_tunnel_key_lens[type]); return -EINVAL; } @@ -406,62 +508,26 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, case OVS_TUNNEL_KEY_ATTR_CSUM: tun_flags |= TUNNEL_CSUM; break; + case OVS_TUNNEL_KEY_ATTR_TP_SRC: + SW_FLOW_KEY_PUT(match, tun_key.tp_src, + nla_get_be16(a), is_mask); + break; + case OVS_TUNNEL_KEY_ATTR_TP_DST: + SW_FLOW_KEY_PUT(match, tun_key.tp_dst, + nla_get_be16(a), is_mask); + break; case OVS_TUNNEL_KEY_ATTR_OAM: tun_flags |= TUNNEL_OAM; break; case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: - tun_flags |= TUNNEL_OPTIONS_PRESENT; - if (nla_len(a) > sizeof(match->key->tun_opts)) { - OVS_NLERR("Geneve option length exceeds maximum size (len %d, max %zu).\n", - nla_len(a), - sizeof(match->key->tun_opts)); - return -EINVAL; - } - - if (nla_len(a) % 4 != 0) { - OVS_NLERR("Geneve option length is not a multiple of 4 (len %d).\n", - nla_len(a)); - return -EINVAL; - } - - /* We need to record the length of the options passed - * down, otherwise packets with the same format but - * additional options will be silently matched. - */ - if (!is_mask) { - SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a), - false); - } else { - /* This is somewhat unusual because it looks at - * both the key and mask while parsing the - * attributes (and by extension assumes the key - * is parsed first). Normally, we would verify - * that each is the correct length and that the - * attributes line up in the validate function. - * However, that is difficult because this is - * variable length and we won't have the - * information later. - */ - if (match->key->tun_opts_len != nla_len(a)) { - OVS_NLERR("Geneve option key length (%d) is different from mask length (%d).", - match->key->tun_opts_len, - nla_len(a)); - return -EINVAL; - } - - SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, - true); - } + err = genev_tun_opt_from_nlattr(a, match, is_mask, log); + if (err) + return err; - opt_key_offset = (unsigned long)GENEVE_OPTS( - (struct sw_flow_key *)0, - nla_len(a)); - SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, - nla_data(a), nla_len(a), - is_mask); + tun_flags |= TUNNEL_OPTIONS_PRESENT; break; default: - OVS_NLERR("Unknown IPv4 tunnel attribute (%d).\n", + OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d", type); return -EINVAL; } @@ -470,18 +536,19 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); if (rem > 0) { - OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem); + OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.", + rem); return -EINVAL; } if (!is_mask) { if (!match->key->tun_key.ipv4_dst) { - OVS_NLERR("IPv4 tunnel destination address is zero.\n"); + OVS_NLERR(log, "IPv4 tunnel dst address is zero"); return -EINVAL; } if (!ttl) { - OVS_NLERR("IPv4 tunnel TTL not specified.\n"); + OVS_NLERR(log, "IPv4 tunnel TTL not specified."); return -EINVAL; } } @@ -514,6 +581,12 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb, if ((output->tun_flags & TUNNEL_CSUM) && nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) return -EMSGSIZE; + if (output->tp_src && + nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src)) + return -EMSGSIZE; + if (output->tp_dst && + nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst)) + return -EMSGSIZE; if ((output->tun_flags & TUNNEL_OAM) && nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM)) return -EMSGSIZE; @@ -525,7 +598,6 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb, return 0; } - static int ipv4_tun_to_nlattr(struct sk_buff *skb, const struct ovs_key_ipv4_tunnel *output, const struct geneve_opt *tun_opts, @@ -546,8 +618,17 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb, return 0; } +int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb, + const struct ovs_tunnel_info *egress_tun_info) +{ + return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel, + egress_tun_info->options, + egress_tun_info->options_len); +} + static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, - const struct nlattr **a, bool is_mask) + const struct nlattr **a, bool is_mask, + bool log) { if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) { u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]); @@ -572,10 +653,13 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); - if (is_mask) + if (is_mask) { in_port = 0xffffffff; /* Always exact match in_port. */ - else if (in_port >= DP_MAX_PORTS) + } else if (in_port >= DP_MAX_PORTS) { + OVS_NLERR(log, "Port %d exceeds max allowable %d", + in_port, DP_MAX_PORTS); return -EINVAL; + } SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); @@ -591,7 +675,7 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, } if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, - is_mask)) + is_mask, log)) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); } @@ -599,12 +683,12 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, } static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, - const struct nlattr **a, bool is_mask) + const struct nlattr **a, bool is_mask, + bool log) { int err; - u64 orig_attrs = attrs; - err = metadata_from_nlattrs(match, &attrs, a, is_mask); + err = metadata_from_nlattrs(match, &attrs, a, is_mask, log); if (err) return err; @@ -625,17 +709,16 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); if (!(tci & htons(VLAN_TAG_PRESENT))) { if (is_mask) - OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n"); + OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit."); else - OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n"); + OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set."); return -EINVAL; } SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); attrs &= ~(1 << OVS_KEY_ATTR_VLAN); - } else if (!is_mask) - SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); + } if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { __be16 eth_type; @@ -645,8 +728,8 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, /* Always exact match EtherType. */ eth_type = htons(0xffff); } else if (ntohs(eth_type) < ETH_P_802_3_MIN) { - OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n", - ntohs(eth_type), ETH_P_802_3_MIN); + OVS_NLERR(log, "EtherType %x is less than min %x", + ntohs(eth_type), ETH_P_802_3_MIN); return -EINVAL; } @@ -661,8 +744,8 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { - OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n", - ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); + OVS_NLERR(log, "IPv4 frag type %d is out of range max %d", + ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; } SW_FLOW_KEY_PUT(match, ip.proto, @@ -685,13 +768,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { - OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n", - ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); + OVS_NLERR(log, "IPv6 frag type %d is out of range max %d", + ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; } if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) { - OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n", + OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x).\n", ntohl(ipv6_key->ipv6_label), (1 << 20) - 1); return -EINVAL; } @@ -723,7 +806,7 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); if (!is_mask && (arp_key->arp_op & htons(0xff00))) { - OVS_NLERR("Unknown ARP opcode (opcode=%d).\n", + OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).", arp_key->arp_op); return -EINVAL; } @@ -742,6 +825,16 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, attrs &= ~(1 << OVS_KEY_ATTR_ARP); } + if (attrs & (1 << OVS_KEY_ATTR_MPLS)) { + const struct ovs_key_mpls *mpls_key; + + mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]); + SW_FLOW_KEY_PUT(match, mpls.top_lse, + mpls_key->mpls_lse, is_mask); + + attrs &= ~(1 << OVS_KEY_ATTR_MPLS); + } + if (attrs & (1 << OVS_KEY_ATTR_TCP)) { const struct ovs_key_tcp *tcp_key; @@ -752,15 +845,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, } if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { - if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { - SW_FLOW_KEY_PUT(match, tp.flags, - nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), - is_mask); - } else { - SW_FLOW_KEY_PUT(match, tp.flags, - nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), - is_mask); - } + SW_FLOW_KEY_PUT(match, tp.flags, + nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), + is_mask); attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS); } @@ -819,8 +906,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, attrs &= ~(1 << OVS_KEY_ATTR_ND); } - if (attrs != 0) + if (attrs != 0) { + OVS_NLERR(log, "Unknown key attributes %llx", + (unsigned long long)attrs); return -EINVAL; + } return 0; } @@ -858,10 +948,14 @@ static void mask_set_nlattr(struct nlattr *attr, u8 val) * of this flow. * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink * attribute specifies the mask field of the wildcarded flow. + * @log: Boolean to allow kernel error logging. Normally true, but when + * probing for feature compatibility this should be passed in as false to + * suppress unnecessary error logging. */ int ovs_nla_get_match(struct sw_flow_match *match, - const struct nlattr *key, - const struct nlattr *mask) + const struct nlattr *nla_key, + const struct nlattr *nla_mask, + bool log) { const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; const struct nlattr *encap; @@ -871,7 +965,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, bool encap_valid = false; int err; - err = parse_flow_nlattrs(key, a, &key_attrs); + err = parse_flow_nlattrs(nla_key, a, &key_attrs, log); if (err) return err; @@ -882,7 +976,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) && (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) { - OVS_NLERR("Invalid Vlan frame.\n"); + OVS_NLERR(log, "Invalid Vlan frame."); return -EINVAL; } @@ -893,61 +987,68 @@ int ovs_nla_get_match(struct sw_flow_match *match, encap_valid = true; if (tci & htons(VLAN_TAG_PRESENT)) { - err = parse_flow_nlattrs(encap, a, &key_attrs); + err = parse_flow_nlattrs(encap, a, &key_attrs, log); if (err) return err; } else if (!tci) { /* Corner case for truncated 802.1Q header. */ if (nla_len(encap)) { - OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n"); + OVS_NLERR(log, "Truncated 802.1Q header has non-zero encap attribute."); return -EINVAL; } } else { - OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n"); + OVS_NLERR(log, "Encap attr is set for non-VLAN frame"); return -EINVAL; } } - err = ovs_key_from_nlattrs(match, key_attrs, a, false); + err = ovs_key_from_nlattrs(match, key_attrs, a, false, log); if (err) return err; - if (match->mask && !mask) { - /* Create an exact match mask. We need to set to 0xff all the - * 'match->mask' fields that have been touched in 'match->key'. - * We cannot simply memset 'match->mask', because padding bytes - * and fields not specified in 'match->key' should be left to 0. - * Instead, we use a stream of netlink attributes, copied from - * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care - * of filling 'match->mask' appropriately. - */ - newmask = kmemdup(key, nla_total_size(nla_len(key)), - GFP_KERNEL); - if (!newmask) - return -ENOMEM; + if (match->mask) { + if (!nla_mask) { + /* Create an exact match mask. We need to set to 0xff + * all the 'match->mask' fields that have been touched + * in 'match->key'. We cannot simply memset + * 'match->mask', because padding bytes and fields not + * specified in 'match->key' should be left to 0. + * Instead, we use a stream of netlink attributes, + * copied from 'key' and set to 0xff. + * ovs_key_from_nlattrs() will take care of filling + * 'match->mask' appropriately. + */ + newmask = kmemdup(nla_key, + nla_total_size(nla_len(nla_key)), + GFP_KERNEL); + if (!newmask) + return -ENOMEM; - mask_set_nlattr(newmask, 0xff); + mask_set_nlattr(newmask, 0xff); - /* The userspace does not send tunnel attributes that are 0, - * but we should not wildcard them nonetheless. - */ - if (match->key->tun_key.ipv4_dst) - SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true); + /* The userspace does not send tunnel attributes that + * are 0, but we should not wildcard them nonetheless. + */ + if (match->key->tun_key.ipv4_dst) + SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, + 0xff, true); - mask = newmask; - } + nla_mask = newmask; + } - if (mask) { - err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); + err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log); if (err) goto free_newmask; + /* Always match on tci. */ + SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); + if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { __be16 eth_type = 0; __be16 tci = 0; if (!encap_valid) { - OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); + OVS_NLERR(log, "Encap mask attribute is set for non-VLAN frame."); err = -EINVAL; goto free_newmask; } @@ -959,12 +1060,13 @@ int ovs_nla_get_match(struct sw_flow_match *match, if (eth_type == htons(0xffff)) { mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); encap = a[OVS_KEY_ATTR_ENCAP]; - err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); + err = parse_flow_mask_nlattrs(encap, a, + &mask_attrs, log); if (err) goto free_newmask; } else { - OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", - ntohs(eth_type)); + OVS_NLERR(log, "VLAN frames must have an exact match on the TPID (mask=%x).", + ntohs(eth_type)); err = -EINVAL; goto free_newmask; } @@ -973,18 +1075,19 @@ int ovs_nla_get_match(struct sw_flow_match *match, tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); if (!(tci & htons(VLAN_TAG_PRESENT))) { - OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); + OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).", + ntohs(tci)); err = -EINVAL; goto free_newmask; } } - err = ovs_key_from_nlattrs(match, mask_attrs, a, true); + err = ovs_key_from_nlattrs(match, mask_attrs, a, true, log); if (err) goto free_newmask; } - if (!match_validate(match, key_attrs, mask_attrs)) + if (!match_validate(match, key_attrs, mask_attrs, log)) err = -EINVAL; free_newmask: @@ -997,6 +1100,9 @@ free_newmask: * @key: Receives extracted in_port, priority, tun_key and skb_mark. * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. + * @log: Boolean to allow kernel error logging. Normally true, but when + * probing for feature compatibility this should be passed in as false to + * suppress unnecessary error logging. * * This parses a series of Netlink attributes that form a flow key, which must * take the same form accepted by flow_from_nlattrs(), but only enough of it to @@ -1005,14 +1111,15 @@ free_newmask: */ int ovs_nla_get_flow_metadata(const struct nlattr *attr, - struct sw_flow_key *key) + struct sw_flow_key *key, + bool log) { const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; struct sw_flow_match match; u64 attrs = 0; int err; - err = parse_flow_nlattrs(attr, a, &attrs); + err = parse_flow_nlattrs(attr, a, &attrs, log); if (err) return -EINVAL; @@ -1021,7 +1128,7 @@ int ovs_nla_get_flow_metadata(const struct nlattr *attr, key->phy.in_port = DP_MAX_PORTS; - return metadata_from_nlattrs(&match, &attrs, a, false); + return metadata_from_nlattrs(&match, &attrs, a, false, log); } int ovs_nla_put_flow(const struct sw_flow_key *swkey, @@ -1147,6 +1254,14 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey, arp_key->arp_op = htons(output->ip.proto); ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha); ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha); + } else if (eth_p_mpls(swkey->eth.type)) { + struct ovs_key_mpls *mpls_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key)); + if (!nla) + goto nla_put_failure; + mpls_key = nla_data(nla); + mpls_key->mpls_lse = output->mpls.top_lse; } if ((swkey->eth.type == htons(ETH_P_IP) || @@ -1233,12 +1348,14 @@ nla_put_failure: #define MAX_ACTIONS_BUFSIZE (32 * 1024) -struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size) +static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) { struct sw_flow_actions *sfa; - if (size > MAX_ACTIONS_BUFSIZE) + if (size > MAX_ACTIONS_BUFSIZE) { + OVS_NLERR(log, "Flow action size %u bytes exceeds max", size); return ERR_PTR(-EINVAL); + } sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); if (!sfa) @@ -1256,7 +1373,7 @@ void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) } static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, - int attr_len) + int attr_len, bool log) { struct sw_flow_actions *acts; @@ -1276,7 +1393,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, new_acts_size = MAX_ACTIONS_BUFSIZE; } - acts = ovs_nla_alloc_flow_actions(new_acts_size); + acts = nla_alloc_flow_actions(new_acts_size, log); if (IS_ERR(acts)) return (void *)acts; @@ -1291,11 +1408,11 @@ out: } static struct nlattr *__add_action(struct sw_flow_actions **sfa, - int attrtype, void *data, int len) + int attrtype, void *data, int len, bool log) { struct nlattr *a; - a = reserve_sfa_size(sfa, nla_attr_size(len)); + a = reserve_sfa_size(sfa, nla_attr_size(len), log); if (IS_ERR(a)) return a; @@ -1310,24 +1427,22 @@ static struct nlattr *__add_action(struct sw_flow_actions **sfa, } static int add_action(struct sw_flow_actions **sfa, int attrtype, - void *data, int len) + void *data, int len, bool log) { struct nlattr *a; - a = __add_action(sfa, attrtype, data, len); - if (IS_ERR(a)) - return PTR_ERR(a); + a = __add_action(sfa, attrtype, data, len, log); - return 0; + return PTR_ERR_OR_ZERO(a); } static inline int add_nested_action_start(struct sw_flow_actions **sfa, - int attrtype) + int attrtype, bool log) { int used = (*sfa)->actions_len; int err; - err = add_action(sfa, attrtype, NULL, 0); + err = add_action(sfa, attrtype, NULL, 0, log); if (err) return err; @@ -1343,9 +1458,15 @@ static inline void add_nested_action_end(struct sw_flow_actions *sfa, a->nla_len = sfa->actions_len - st_offset; } +static int __ovs_nla_copy_actions(const struct nlattr *attr, + const struct sw_flow_key *key, + int depth, struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, bool log); + static int validate_and_copy_sample(const struct nlattr *attr, const struct sw_flow_key *key, int depth, - struct sw_flow_actions **sfa) + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, bool log) { const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; const struct nlattr *probability, *actions; @@ -1371,18 +1492,19 @@ static int validate_and_copy_sample(const struct nlattr *attr, return -EINVAL; /* validation done, copy sample action. */ - start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE); + start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log); if (start < 0) return start; err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, - nla_data(probability), sizeof(u32)); + nla_data(probability), sizeof(u32), log); if (err) return err; - st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS); + st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log); if (st_acts < 0) return st_acts; - err = ovs_nla_copy_actions(actions, key, depth + 1, sfa); + err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa, + eth_type, vlan_tci, log); if (err) return err; @@ -1392,10 +1514,10 @@ static int validate_and_copy_sample(const struct nlattr *attr, return 0; } -static int validate_tp_port(const struct sw_flow_key *flow_key) +static int validate_tp_port(const struct sw_flow_key *flow_key, + __be16 eth_type) { - if ((flow_key->eth.type == htons(ETH_P_IP) || - flow_key->eth.type == htons(ETH_P_IPV6)) && + if ((eth_type == htons(ETH_P_IP) || eth_type == htons(ETH_P_IPV6)) && (flow_key->tp.src || flow_key->tp.dst)) return 0; @@ -1419,7 +1541,7 @@ void ovs_match_init(struct sw_flow_match *match, } static int validate_and_copy_set_tun(const struct nlattr *attr, - struct sw_flow_actions **sfa) + struct sw_flow_actions **sfa, bool log) { struct sw_flow_match match; struct sw_flow_key key; @@ -1428,7 +1550,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, int err, start; ovs_match_init(&match, &key, NULL); - err = ipv4_tun_from_nlattr(nla_data(attr), &match, false); + err = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log); if (err) return err; @@ -1457,12 +1579,12 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, key.tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0; }; - start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET); + start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log); if (start < 0) return start; a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL, - sizeof(*tun_info) + key.tun_opts_len); + sizeof(*tun_info) + key.tun_opts_len, log); if (IS_ERR(a)) return PTR_ERR(a); @@ -1490,7 +1612,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, static int validate_set(const struct nlattr *a, const struct sw_flow_key *flow_key, struct sw_flow_actions **sfa, - bool *set_tun) + bool *set_tun, __be16 eth_type, bool log) { const struct nlattr *ovs_key = nla_data(a); int key_type = nla_type(ovs_key); @@ -1515,14 +1637,17 @@ static int validate_set(const struct nlattr *a, break; case OVS_KEY_ATTR_TUNNEL: + if (eth_p_mpls(eth_type)) + return -EINVAL; + *set_tun = true; - err = validate_and_copy_set_tun(a, sfa); + err = validate_and_copy_set_tun(a, sfa, log); if (err) return err; break; case OVS_KEY_ATTR_IPV4: - if (flow_key->eth.type != htons(ETH_P_IP)) + if (eth_type != htons(ETH_P_IP)) return -EINVAL; if (!flow_key->ip.proto) @@ -1538,7 +1663,7 @@ static int validate_set(const struct nlattr *a, break; case OVS_KEY_ATTR_IPV6: - if (flow_key->eth.type != htons(ETH_P_IPV6)) + if (eth_type != htons(ETH_P_IPV6)) return -EINVAL; if (!flow_key->ip.proto) @@ -1560,19 +1685,24 @@ static int validate_set(const struct nlattr *a, if (flow_key->ip.proto != IPPROTO_TCP) return -EINVAL; - return validate_tp_port(flow_key); + return validate_tp_port(flow_key, eth_type); case OVS_KEY_ATTR_UDP: if (flow_key->ip.proto != IPPROTO_UDP) return -EINVAL; - return validate_tp_port(flow_key); + return validate_tp_port(flow_key, eth_type); + + case OVS_KEY_ATTR_MPLS: + if (!eth_p_mpls(eth_type)) + return -EINVAL; + break; case OVS_KEY_ATTR_SCTP: if (flow_key->ip.proto != IPPROTO_SCTP) return -EINVAL; - return validate_tp_port(flow_key); + return validate_tp_port(flow_key, eth_type); default: return -EINVAL; @@ -1586,6 +1716,7 @@ static int validate_userspace(const struct nlattr *attr) static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, + [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 }, }; struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; int error; @@ -1603,12 +1734,12 @@ static int validate_userspace(const struct nlattr *attr) } static int copy_action(const struct nlattr *from, - struct sw_flow_actions **sfa) + struct sw_flow_actions **sfa, bool log) { int totlen = NLA_ALIGN(from->nla_len); struct nlattr *to; - to = reserve_sfa_size(sfa, from->nla_len); + to = reserve_sfa_size(sfa, from->nla_len, log); if (IS_ERR(to)) return PTR_ERR(to); @@ -1616,12 +1747,13 @@ static int copy_action(const struct nlattr *from, return 0; } -int ovs_nla_copy_actions(const struct nlattr *attr, - const struct sw_flow_key *key, - int depth, - struct sw_flow_actions **sfa) +static int __ovs_nla_copy_actions(const struct nlattr *attr, + const struct sw_flow_key *key, + int depth, struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, bool log) { const struct nlattr *a; + bool out_tnl_port = false; int rem, err; if (depth >= SAMPLE_ACTION_DEPTH) @@ -1633,6 +1765,8 @@ int ovs_nla_copy_actions(const struct nlattr *attr, [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), [OVS_ACTION_ATTR_RECIRC] = sizeof(u32), [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, + [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls), + [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16), [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), [OVS_ACTION_ATTR_POP_VLAN] = 0, [OVS_ACTION_ATTR_SET] = (u32)-1, @@ -1662,6 +1796,8 @@ int ovs_nla_copy_actions(const struct nlattr *attr, case OVS_ACTION_ATTR_OUTPUT: if (nla_get_u32(a) >= DP_MAX_PORTS) return -EINVAL; + out_tnl_port = false; + break; case OVS_ACTION_ATTR_HASH: { @@ -1678,6 +1814,7 @@ int ovs_nla_copy_actions(const struct nlattr *attr, } case OVS_ACTION_ATTR_POP_VLAN: + vlan_tci = htons(0); break; case OVS_ACTION_ATTR_PUSH_VLAN: @@ -1686,29 +1823,77 @@ int ovs_nla_copy_actions(const struct nlattr *attr, return -EINVAL; if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) return -EINVAL; + vlan_tci = vlan->vlan_tci; break; case OVS_ACTION_ATTR_RECIRC: break; + case OVS_ACTION_ATTR_PUSH_MPLS: { + const struct ovs_action_push_mpls *mpls = nla_data(a); + + /* Networking stack do not allow simultaneous Tunnel + * and MPLS GSO. + */ + if (out_tnl_port) + return -EINVAL; + + if (!eth_p_mpls(mpls->mpls_ethertype)) + return -EINVAL; + /* Prohibit push MPLS other than to a white list + * for packets that have a known tag order. + */ + if (vlan_tci & htons(VLAN_TAG_PRESENT) || + (eth_type != htons(ETH_P_IP) && + eth_type != htons(ETH_P_IPV6) && + eth_type != htons(ETH_P_ARP) && + eth_type != htons(ETH_P_RARP) && + !eth_p_mpls(eth_type))) + return -EINVAL; + eth_type = mpls->mpls_ethertype; + break; + } + + case OVS_ACTION_ATTR_POP_MPLS: + if (vlan_tci & htons(VLAN_TAG_PRESENT) || + !eth_p_mpls(eth_type)) + return -EINVAL; + + /* Disallow subsequent L2.5+ set and mpls_pop actions + * as there is no check here to ensure that the new + * eth_type is valid and thus set actions could + * write off the end of the packet or otherwise + * corrupt it. + * + * Support for these actions is planned using packet + * recirculation. + */ + eth_type = htons(0); + break; + case OVS_ACTION_ATTR_SET: - err = validate_set(a, key, sfa, &skip_copy); + err = validate_set(a, key, sfa, + &out_tnl_port, eth_type, log); if (err) return err; + + skip_copy = out_tnl_port; break; case OVS_ACTION_ATTR_SAMPLE: - err = validate_and_copy_sample(a, key, depth, sfa); + err = validate_and_copy_sample(a, key, depth, sfa, + eth_type, vlan_tci, log); if (err) return err; skip_copy = true; break; default: + OVS_NLERR(log, "Unknown Action type %d", type); return -EINVAL; } if (!skip_copy) { - err = copy_action(a, sfa); + err = copy_action(a, sfa, log); if (err) return err; } @@ -1720,6 +1905,24 @@ int ovs_nla_copy_actions(const struct nlattr *attr, return 0; } +int ovs_nla_copy_actions(const struct nlattr *attr, + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, bool log) +{ + int err; + + *sfa = nla_alloc_flow_actions(nla_len(attr), log); + if (IS_ERR(*sfa)) + return PTR_ERR(*sfa); + + err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type, + key->eth.tci, log); + if (err) + kfree(*sfa); + + return err; +} + static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) { const struct nlattr *a; diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h index 206e45add8882540b56426c9e57fdca86a0e20a5..577f12be34592a5abbab4f142b5ab02ebda49179 100644 --- a/net/openvswitch/flow_netlink.h +++ b/net/openvswitch/flow_netlink.h @@ -37,24 +37,28 @@ #include "flow.h" +size_t ovs_tun_key_attr_size(void); +size_t ovs_key_attr_size(void); + void ovs_match_init(struct sw_flow_match *match, struct sw_flow_key *key, struct sw_flow_mask *mask); int ovs_nla_put_flow(const struct sw_flow_key *, const struct sw_flow_key *, struct sk_buff *); -int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *); +int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *, + bool log); -int ovs_nla_get_match(struct sw_flow_match *match, - const struct nlattr *, - const struct nlattr *); +int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key, + const struct nlattr *mask, bool log); +int ovs_nla_put_egress_tunnel_key(struct sk_buff *, + const struct ovs_tunnel_info *); int ovs_nla_copy_actions(const struct nlattr *attr, - const struct sw_flow_key *key, int depth, - struct sw_flow_actions **sfa); + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, bool log); int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb); -struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len); void ovs_nla_free_flow_actions(struct sw_flow_actions *); #endif /* flow_netlink.h */ diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index cf2d853646f05dc61a1c8f9093782196860ab9bc..5899bf161c617de67566f60ad86a23b8ee111d90 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -107,7 +107,7 @@ err: return ERR_PTR(-ENOMEM); } -int ovs_flow_tbl_count(struct flow_table *table) +int ovs_flow_tbl_count(const struct flow_table *table) { return table->count; } @@ -250,11 +250,14 @@ skip_flows: __table_instance_destroy(ti); } -void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) +/* No need for locking this function is called from RCU callback or + * error path. + */ +void ovs_flow_tbl_destroy(struct flow_table *table) { - struct table_instance *ti = ovsl_dereference(table->ti); + struct table_instance *ti = rcu_dereference_raw(table->ti); - table_instance_destroy(ti, deferred); + table_instance_destroy(ti, false); } struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, @@ -363,7 +366,7 @@ static u32 flow_hash(const struct sw_flow_key *key, int key_start, /* Make sure number of hash bytes are multiple of u32. */ BUILD_BUG_ON(sizeof(long) % sizeof(u32)); - return arch_fast_hash2(hash_key, hash_u32s, 0); + return jhash2(hash_key, hash_u32s, 0); } static int flow_key_start(const struct sw_flow_key *key) @@ -398,7 +401,7 @@ static bool flow_cmp_masked_key(const struct sw_flow *flow, } bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, - struct sw_flow_match *match) + const struct sw_flow_match *match) { struct sw_flow_key *key = match->key; int key_start = flow_key_start(key); @@ -409,7 +412,7 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, static struct sw_flow *masked_flow_lookup(struct table_instance *ti, const struct sw_flow_key *unmasked, - struct sw_flow_mask *mask) + const struct sw_flow_mask *mask) { struct sw_flow *flow; struct hlist_head *head; @@ -457,7 +460,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, } struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, - struct sw_flow_match *match) + const struct sw_flow_match *match) { struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); struct sw_flow_mask *mask; @@ -560,7 +563,7 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, /* Add 'mask' into the mask list, if it is not already there. */ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, - struct sw_flow_mask *new) + const struct sw_flow_mask *new) { struct sw_flow_mask *mask; mask = flow_mask_find(tbl, new); @@ -583,7 +586,7 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, /* Must be called with OVS mutex held. */ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, - struct sw_flow_mask *mask) + const struct sw_flow_mask *mask) { struct table_instance *new_ti = NULL; struct table_instance *ti; diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 5918bff7f3f6cfee2fd58bcd15101446b44469d8..309fa6415689d89494af3762dc4c7caefa7db673 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -61,12 +61,12 @@ struct sw_flow *ovs_flow_alloc(void); void ovs_flow_free(struct sw_flow *, bool deferred); int ovs_flow_tbl_init(struct flow_table *); -int ovs_flow_tbl_count(struct flow_table *table); -void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); +int ovs_flow_tbl_count(const struct flow_table *table); +void ovs_flow_tbl_destroy(struct flow_table *table); int ovs_flow_tbl_flush(struct flow_table *flow_table); int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, - struct sw_flow_mask *mask); + const struct sw_flow_mask *mask); void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); int ovs_flow_tbl_num_masks(const struct flow_table *table); struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, @@ -77,9 +77,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, const struct sw_flow_key *); struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, - struct sw_flow_match *match); + const struct sw_flow_match *match); bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, - struct sw_flow_match *match); + const struct sw_flow_match *match); void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, const struct sw_flow_mask *mask); diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index 106a9d80b663e5929ddbce87dd175b995d3113e2..347fa2325b226309b8e3cfb239f2e1d0a6631a50 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -28,6 +29,8 @@ #include "datapath.h" #include "vport.h" +static struct vport_ops ovs_geneve_vport_ops; + /** * struct geneve_port - Keeps track of open UDP ports * @gs: The socket created for this port number. @@ -65,7 +68,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) } /* Convert 24 bit VNI to 64 bit tunnel ID. */ -static __be64 vni_to_tunnel_id(__u8 *vni) +static __be64 vni_to_tunnel_id(const __u8 *vni) { #ifdef __BIG_ENDIAN return (vni[0] << 16) | (vni[1] << 8) | vni[2]; @@ -94,7 +97,9 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb) key = vni_to_tunnel_id(geneveh->vni); - ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, flags, + ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), + udp_hdr(skb)->source, udp_hdr(skb)->dest, + key, flags, geneveh->options, opts_len); ovs_vport_receive(vport, skb, &tun_info); @@ -225,11 +230,46 @@ static const char *geneve_get_name(const struct vport *vport) return geneve_port->name; } -const struct vport_ops ovs_geneve_vport_ops = { +static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *egress_tun_info) +{ + struct geneve_port *geneve_port = geneve_vport(vport); + struct net *net = ovs_dp_get_net(vport->dp); + __be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport; + __be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true); + + /* Get tp_src and tp_dst, refert to geneve_build_header(). + */ + return ovs_tunnel_get_egress_info(egress_tun_info, + ovs_dp_get_net(vport->dp), + OVS_CB(skb)->egress_tun_info, + IPPROTO_UDP, skb->mark, sport, dport); +} + +static struct vport_ops ovs_geneve_vport_ops = { .type = OVS_VPORT_TYPE_GENEVE, .create = geneve_tnl_create, .destroy = geneve_tnl_destroy, .get_name = geneve_get_name, .get_options = geneve_get_options, .send = geneve_tnl_send, + .owner = THIS_MODULE, + .get_egress_tun_info = geneve_get_egress_tun_info, }; + +static int __init ovs_geneve_tnl_init(void) +{ + return ovs_vport_ops_register(&ovs_geneve_vport_ops); +} + +static void __exit ovs_geneve_tnl_exit(void) +{ + ovs_vport_ops_unregister(&ovs_geneve_vport_ops); +} + +module_init(ovs_geneve_tnl_init); +module_exit(ovs_geneve_tnl_exit); + +MODULE_DESCRIPTION("OVS: Geneve swiching port"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("vport-type-5"); diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 108b82da2fd94169bab809df9d771232fa0c53b5..6b69df545b1da3dba7ffedb92e106855106fd8d6 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -45,6 +46,8 @@ #include "datapath.h" #include "vport.h" +static struct vport_ops ovs_gre_vport_ops; + /* Returns the least-significant 32 bits of a __be64. */ static __be32 be64_get_low32(__be64 x) { @@ -105,7 +108,7 @@ static int gre_rcv(struct sk_buff *skb, return PACKET_REJECT; key = key_to_tunnel_id(tpi->key, tpi->seq); - ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, + ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key, filter_tnl_flags(tpi->flags), NULL, 0); ovs_vport_receive(vport, skb, &tun_info); @@ -172,14 +175,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) goto err_free_rt; } - if (vlan_tx_tag_present(skb)) { - if (unlikely(!__vlan_put_tag(skb, - skb->vlan_proto, - vlan_tx_tag_get(skb)))) { - err = -ENOMEM; - goto err_free_rt; - } - skb->vlan_tci = 0; + skb = vlan_hwaccel_push_inside(skb); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_free_rt; } /* Push Tunnel header. */ @@ -281,10 +280,38 @@ static void gre_tnl_destroy(struct vport *vport) gre_exit(); } -const struct vport_ops ovs_gre_vport_ops = { +static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *egress_tun_info) +{ + return ovs_tunnel_get_egress_info(egress_tun_info, + ovs_dp_get_net(vport->dp), + OVS_CB(skb)->egress_tun_info, + IPPROTO_GRE, skb->mark, 0, 0); +} + +static struct vport_ops ovs_gre_vport_ops = { .type = OVS_VPORT_TYPE_GRE, .create = gre_create, .destroy = gre_tnl_destroy, .get_name = gre_get_name, .send = gre_tnl_send, + .get_egress_tun_info = gre_get_egress_tun_info, + .owner = THIS_MODULE, }; + +static int __init ovs_gre_tnl_init(void) +{ + return ovs_vport_ops_register(&ovs_gre_vport_ops); +} + +static void __exit ovs_gre_tnl_exit(void) +{ + ovs_vport_ops_unregister(&ovs_gre_vport_ops); +} + +module_init(ovs_gre_tnl_init); +module_exit(ovs_gre_tnl_exit); + +MODULE_DESCRIPTION("OVS: GRE switching port"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("vport-type-3"); diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 84516126e5f3a74b4b88cd87e1dd316ec11b98d9..6a55f71055051957685b11e8950ba1e4197dce20 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -36,6 +36,8 @@ struct internal_dev { struct vport *vport; }; +static struct vport_ops ovs_internal_vport_ops; + static struct internal_dev *internal_dev_priv(struct net_device *netdev) { return netdev_priv(netdev); @@ -222,6 +224,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) struct net_device *netdev = netdev_vport_priv(vport)->dev; int len; + if (unlikely(!(netdev->flags & IFF_UP))) { + kfree_skb(skb); + return 0; + } + len = skb->len; skb_dst_drop(skb); @@ -238,7 +245,7 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) return len; } -const struct vport_ops ovs_internal_vport_ops = { +static struct vport_ops ovs_internal_vport_ops = { .type = OVS_VPORT_TYPE_INTERNAL, .create = internal_dev_create, .destroy = internal_dev_destroy, @@ -261,10 +268,21 @@ struct vport *ovs_internal_dev_get_vport(struct net_device *netdev) int ovs_internal_dev_rtnl_link_register(void) { - return rtnl_link_register(&internal_dev_link_ops); + int err; + + err = rtnl_link_register(&internal_dev_link_ops); + if (err < 0) + return err; + + err = ovs_vport_ops_register(&ovs_internal_vport_ops); + if (err < 0) + rtnl_link_unregister(&internal_dev_link_ops); + + return err; } void ovs_internal_dev_rtnl_link_unregister(void) { + ovs_vport_ops_unregister(&ovs_internal_vport_ops); rtnl_link_unregister(&internal_dev_link_ops); } diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index d21f77d875ba7b690a0c46231e0b539172488562..4776282c64175209924740fbd87a56de8e05b609 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -33,6 +33,8 @@ #include "vport-internal_dev.h" #include "vport-netdev.h" +static struct vport_ops ovs_netdev_vport_ops; + /* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { @@ -75,7 +77,7 @@ static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; } -static struct net_device *get_dpdev(struct datapath *dp) +static struct net_device *get_dpdev(const struct datapath *dp) { struct vport *local; @@ -224,10 +226,20 @@ struct vport *ovs_netdev_get_vport(struct net_device *dev) return NULL; } -const struct vport_ops ovs_netdev_vport_ops = { +static struct vport_ops ovs_netdev_vport_ops = { .type = OVS_VPORT_TYPE_NETDEV, .create = netdev_create, .destroy = netdev_destroy, .get_name = ovs_netdev_get_name, .send = netdev_send, }; + +int __init ovs_netdev_init(void) +{ + return ovs_vport_ops_register(&ovs_netdev_vport_ops); +} + +void ovs_netdev_exit(void) +{ + ovs_vport_ops_unregister(&ovs_netdev_vport_ops); +} diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h index 8df01c1127e546d06ba2f8c3fde44b51c58ad0de..6f7038e79c524cc66dc53188992b0ed9ff6c23ed 100644 --- a/net/openvswitch/vport-netdev.h +++ b/net/openvswitch/vport-netdev.h @@ -41,4 +41,7 @@ netdev_vport_priv(const struct vport *vport) const char *ovs_netdev_get_name(const struct vport *); void ovs_netdev_detach_dev(struct vport *); +int __init ovs_netdev_init(void); +void ovs_netdev_exit(void); + #endif /* vport_netdev.h */ diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 2735e01dca73a40e4f60027152a79efa435724a7..38f95a52241bd53af8de06b76079e403de8413cf 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -50,6 +51,8 @@ struct vxlan_port { char name[IFNAMSIZ]; }; +static struct vport_ops ovs_vxlan_vport_ops; + static inline struct vxlan_port *vxlan_vport(const struct vport *vport) { return vport_priv(vport); @@ -66,7 +69,9 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni) /* Save outer tunnel values */ iph = ip_hdr(skb); key = cpu_to_be64(ntohl(vx_vni) >> 8); - ovs_flow_tun_info_init(&tun_info, iph, key, TUNNEL_KEY, NULL, 0); + ovs_flow_tun_info_init(&tun_info, iph, + udp_hdr(skb)->source, udp_hdr(skb)->dest, + key, TUNNEL_KEY, NULL, 0); ovs_vport_receive(vport, skb, &tun_info); } @@ -186,17 +191,55 @@ error: return err; } +static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *egress_tun_info) +{ + struct net *net = ovs_dp_get_net(vport->dp); + struct vxlan_port *vxlan_port = vxlan_vport(vport); + __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; + __be16 src_port; + int port_min; + int port_max; + + inet_get_local_port_range(net, &port_min, &port_max); + src_port = udp_flow_src_port(net, skb, 0, 0, true); + + return ovs_tunnel_get_egress_info(egress_tun_info, net, + OVS_CB(skb)->egress_tun_info, + IPPROTO_UDP, skb->mark, + src_port, dst_port); +} + static const char *vxlan_get_name(const struct vport *vport) { struct vxlan_port *vxlan_port = vxlan_vport(vport); return vxlan_port->name; } -const struct vport_ops ovs_vxlan_vport_ops = { +static struct vport_ops ovs_vxlan_vport_ops = { .type = OVS_VPORT_TYPE_VXLAN, .create = vxlan_tnl_create, .destroy = vxlan_tnl_destroy, .get_name = vxlan_get_name, .get_options = vxlan_get_options, .send = vxlan_tnl_send, + .get_egress_tun_info = vxlan_get_egress_tun_info, + .owner = THIS_MODULE, }; + +static int __init ovs_vxlan_tnl_init(void) +{ + return ovs_vport_ops_register(&ovs_vxlan_vport_ops); +} + +static void __exit ovs_vxlan_tnl_exit(void) +{ + ovs_vport_ops_unregister(&ovs_vxlan_vport_ops); +} + +module_init(ovs_vxlan_tnl_init); +module_exit(ovs_vxlan_tnl_exit); + +MODULE_DESCRIPTION("OVS: VXLAN switching port"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("vport-type-4"); diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 6015802ebe6fee99b0b44c6f39da8f3bcd0f71f7..9584526c077804f21d22726d4bb5bd7cc4260d36 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "datapath.h" #include "vport.h" @@ -36,22 +37,7 @@ static void ovs_vport_record_error(struct vport *, enum vport_err_type err_type); -/* List of statically compiled vport implementations. Don't forget to also - * add yours to the list at the bottom of vport.h. */ -static const struct vport_ops *vport_ops_list[] = { - &ovs_netdev_vport_ops, - &ovs_internal_vport_ops, - -#ifdef CONFIG_OPENVSWITCH_GRE - &ovs_gre_vport_ops, -#endif -#ifdef CONFIG_OPENVSWITCH_VXLAN - &ovs_vxlan_vport_ops, -#endif -#ifdef CONFIG_OPENVSWITCH_GENEVE - &ovs_geneve_vport_ops, -#endif -}; +static LIST_HEAD(vport_ops_list); /* Protected by RCU read lock for reading, ovs_mutex for writing. */ static struct hlist_head *dev_table; @@ -82,12 +68,38 @@ void ovs_vport_exit(void) kfree(dev_table); } -static struct hlist_head *hash_bucket(struct net *net, const char *name) +static struct hlist_head *hash_bucket(const struct net *net, const char *name) { unsigned int hash = jhash(name, strlen(name), (unsigned long) net); return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; } +int ovs_vport_ops_register(struct vport_ops *ops) +{ + int err = -EEXIST; + struct vport_ops *o; + + ovs_lock(); + list_for_each_entry(o, &vport_ops_list, list) + if (ops->type == o->type) + goto errout; + + list_add_tail(&ops->list, &vport_ops_list); + err = 0; +errout: + ovs_unlock(); + return err; +} +EXPORT_SYMBOL_GPL(ovs_vport_ops_register); + +void ovs_vport_ops_unregister(struct vport_ops *ops) +{ + ovs_lock(); + list_del(&ops->list); + ovs_unlock(); +} +EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister); + /** * ovs_vport_locate - find a port that has already been created * @@ -95,7 +107,7 @@ static struct hlist_head *hash_bucket(struct net *net, const char *name) * * Must be called with ovs or RCU read lock. */ -struct vport *ovs_vport_locate(struct net *net, const char *name) +struct vport *ovs_vport_locate(const struct net *net, const char *name) { struct hlist_head *bucket = hash_bucket(net, name); struct vport *vport; @@ -153,6 +165,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, return vport; } +EXPORT_SYMBOL_GPL(ovs_vport_alloc); /** * ovs_vport_free - uninitialize and free vport @@ -173,6 +186,18 @@ void ovs_vport_free(struct vport *vport) free_percpu(vport->percpu_stats); kfree(vport); } +EXPORT_SYMBOL_GPL(ovs_vport_free); + +static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms) +{ + struct vport_ops *ops; + + list_for_each_entry(ops, &vport_ops_list, list) + if (ops->type == parms->type) + return ops; + + return NULL; +} /** * ovs_vport_add - add vport device (for kernel callers) @@ -184,31 +209,40 @@ void ovs_vport_free(struct vport *vport) */ struct vport *ovs_vport_add(const struct vport_parms *parms) { + struct vport_ops *ops; struct vport *vport; - int err = 0; - int i; - for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { - if (vport_ops_list[i]->type == parms->type) { - struct hlist_head *bucket; + ops = ovs_vport_lookup(parms); + if (ops) { + struct hlist_head *bucket; - vport = vport_ops_list[i]->create(parms); - if (IS_ERR(vport)) { - err = PTR_ERR(vport); - goto out; - } + if (!try_module_get(ops->owner)) + return ERR_PTR(-EAFNOSUPPORT); - bucket = hash_bucket(ovs_dp_get_net(vport->dp), - vport->ops->get_name(vport)); - hlist_add_head_rcu(&vport->hash_node, bucket); + vport = ops->create(parms); + if (IS_ERR(vport)) { + module_put(ops->owner); return vport; } + + bucket = hash_bucket(ovs_dp_get_net(vport->dp), + vport->ops->get_name(vport)); + hlist_add_head_rcu(&vport->hash_node, bucket); + return vport; } - err = -EAFNOSUPPORT; + /* Unlock to attempt module load and return -EAGAIN if load + * was successful as we need to restart the port addition + * workflow. + */ + ovs_unlock(); + request_module("vport-type-%d", parms->type); + ovs_lock(); -out: - return ERR_PTR(err); + if (!ovs_vport_lookup(parms)) + return ERR_PTR(-EAFNOSUPPORT); + else + return ERR_PTR(-EAGAIN); } /** @@ -242,6 +276,8 @@ void ovs_vport_del(struct vport *vport) hlist_del_rcu(&vport->hash_node); vport->ops->destroy(vport); + + module_put(vport->ops->owner); } /** @@ -344,7 +380,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) * * Must be called with ovs_mutex. */ -int ovs_vport_set_upcall_portids(struct vport *vport, struct nlattr *ids) +int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids) { struct vport_portids *old, *vport_portids; @@ -435,7 +471,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb) * skb->data should point to the Ethernet header. */ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, - struct ovs_tunnel_info *tun_info) + const struct ovs_tunnel_info *tun_info) { struct pcpu_sw_netstats *stats; struct sw_flow_key key; @@ -457,6 +493,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, } ovs_dp_process_packet(skb, &key); } +EXPORT_SYMBOL_GPL(ovs_vport_receive); /** * ovs_vport_send - send a packet on a device @@ -535,3 +572,65 @@ void ovs_vport_deferred_free(struct vport *vport) call_rcu(&vport->rcu, free_vport_rcu); } +EXPORT_SYMBOL_GPL(ovs_vport_deferred_free); + +int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info, + struct net *net, + const struct ovs_tunnel_info *tun_info, + u8 ipproto, + u32 skb_mark, + __be16 tp_src, + __be16 tp_dst) +{ + const struct ovs_key_ipv4_tunnel *tun_key; + struct rtable *rt; + struct flowi4 fl; + + if (unlikely(!tun_info)) + return -EINVAL; + + tun_key = &tun_info->tunnel; + + /* Route lookup to get srouce IP address. + * The process may need to be changed if the corresponding process + * in vports ops changed. + */ + memset(&fl, 0, sizeof(fl)); + fl.daddr = tun_key->ipv4_dst; + fl.saddr = tun_key->ipv4_src; + fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos); + fl.flowi4_mark = skb_mark; + fl.flowi4_proto = ipproto; + + rt = ip_route_output_key(net, &fl); + if (IS_ERR(rt)) + return PTR_ERR(rt); + + ip_rt_put(rt); + + /* Generate egress_tun_info based on tun_info, + * saddr, tp_src and tp_dst + */ + __ovs_flow_tun_info_init(egress_tun_info, + fl.saddr, tun_key->ipv4_dst, + tun_key->ipv4_tos, + tun_key->ipv4_ttl, + tp_src, tp_dst, + tun_key->tun_id, + tun_key->tun_flags, + tun_info->options, + tun_info->options_len); + + return 0; +} +EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info); + +int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *info) +{ + /* get_egress_tun_info() is only implemented on tunnel ports. */ + if (unlikely(!vport->ops->get_egress_tun_info)) + return -EINVAL; + + return vport->ops->get_egress_tun_info(vport, skb, info); +} diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 8942125de3a6c051e9d8d818f747bd3cce023be4..99c8e71d9e6cb1d2d73bf76829b123811359ed39 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -45,19 +45,29 @@ void ovs_vport_exit(void); struct vport *ovs_vport_add(const struct vport_parms *); void ovs_vport_del(struct vport *); -struct vport *ovs_vport_locate(struct net *net, const char *name); +struct vport *ovs_vport_locate(const struct net *net, const char *name); void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *); int ovs_vport_set_options(struct vport *, struct nlattr *options); int ovs_vport_get_options(const struct vport *, struct sk_buff *); -int ovs_vport_set_upcall_portids(struct vport *, struct nlattr *pids); +int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids); int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *); u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *); int ovs_vport_send(struct vport *, struct sk_buff *); +int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info, + struct net *net, + const struct ovs_tunnel_info *tun_info, + u8 ipproto, + u32 skb_mark, + __be16 tp_src, + __be16 tp_dst); +int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *info); + /* The following definitions are for implementers of vport devices: */ struct vport_err_stats { @@ -146,6 +156,8 @@ struct vport_parms { * @get_name: Get the device's name. * @send: Send a packet on the device. Returns the length of the packet sent, * zero for dropped packets or negative for error. + * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for + * a packet. */ struct vport_ops { enum ovs_vport_type type; @@ -161,6 +173,11 @@ struct vport_ops { const char *(*get_name)(const struct vport *); int (*send)(struct vport *, struct sk_buff *); + int (*get_egress_tun_info)(struct vport *, struct sk_buff *, + struct ovs_tunnel_info *); + + struct module *owner; + struct list_head list; }; enum vport_err_type { @@ -207,15 +224,7 @@ static inline struct vport *vport_from_priv(void *priv) } void ovs_vport_receive(struct vport *, struct sk_buff *, - struct ovs_tunnel_info *); - -/* List of statically compiled vport implementations. Don't forget to also - * add yours to the list at the top of vport.c. */ -extern const struct vport_ops ovs_netdev_vport_ops; -extern const struct vport_ops ovs_internal_vport_ops; -extern const struct vport_ops ovs_gre_vport_ops; -extern const struct vport_ops ovs_vxlan_vport_ops; -extern const struct vport_ops ovs_geneve_vport_ops; + const struct ovs_tunnel_info *); static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len) @@ -224,4 +233,7 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); } +int ovs_vport_ops_register(struct vport_ops *ops); +void ovs_vport_ops_unregister(struct vport_ops *ops); + #endif /* vport.h */ diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 586229a14ad3984f154753d45ac357613c5a442a..e52a4478568198e88d1d8f8ee6a8506284c36323 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1676,7 +1676,7 @@ retry: if (len < hhlen) skb_reset_network_header(skb); } - err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err) goto out_free; goto retry; @@ -2095,6 +2095,18 @@ static void tpacket_destruct_skb(struct sk_buff *skb) sock_wfree(skb); } +static bool ll_header_truncated(const struct net_device *dev, int len) +{ + /* net device doesn't like empty head */ + if (unlikely(len <= dev->hard_header_len)) { + net_warn_ratelimited("%s: packet size is too short (%d < %d)\n", + current->comm, len, dev->hard_header_len); + return true; + } + + return false; +} + static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, int size_max, __be16 proto, unsigned char *addr, int hlen) @@ -2170,12 +2182,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, if (unlikely(err < 0)) return -EINVAL; } else if (dev->hard_header_len) { - /* net device doesn't like empty head */ - if (unlikely(tp_len <= dev->hard_header_len)) { - pr_err("packet size is too short (%d < %d)\n", - tp_len, dev->hard_header_len); + if (ll_header_truncated(dev, tp_len)) return -EINVAL; - } skb_push(skb, dev->hard_header_len); err = skb_store_bits(skb, 0, data, @@ -2400,6 +2408,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) unsigned short gso_type = 0; int hlen, tlen; int extra_len = 0; + ssize_t n; /* * Get and verify the address. @@ -2438,9 +2447,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) len -= vnet_hdr_len; - err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov, - vnet_hdr_len); - if (err < 0) + err = -EFAULT; + n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter); + if (n != vnet_hdr_len) goto out_unlock; if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && @@ -2503,12 +2512,17 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb_set_network_header(skb, reserve); err = -EINVAL; - if (sock->type == SOCK_DGRAM && - (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0) - goto out_free; + if (sock->type == SOCK_DGRAM) { + offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); + if (unlikely(offset) < 0) + goto out_free; + } else { + if (ll_header_truncated(dev, len)) + goto out_free; + } /* Returns -EFAULT on error */ - err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); + err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); if (err) goto out_free; @@ -2946,8 +2960,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; } /* else everything is zero */ - err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr, - vnet_hdr_len); + err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len); if (err < 0) goto out_free; } @@ -2962,7 +2975,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free; diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index 5a940dbd74a3bbf6ec3cadb82cbe47a4dbee8f15..32ab87d3482864279ed768c30a965a31f9ffc561 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -426,16 +426,17 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa)); if (!out_dev) { - LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n", - pn_sockaddr_get_addr(&sa)); + net_dbg_ratelimited("No Phonet route to %02X\n", + pn_sockaddr_get_addr(&sa)); goto out; } __skb_push(skb, sizeof(struct phonethdr)); skb->dev = out_dev; if (out_dev == dev) { - LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n", - pn_sockaddr_get_addr(&sa), dev->name); + net_dbg_ratelimited("Phonet loop to %02X on %s\n", + pn_sockaddr_get_addr(&sa), + dev->name); goto out_dev; } /* Some drivers (e.g. TUN) do not allocate HW header space */ diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index 290352c0e6b476c894bd33928c4bca6d9c862aae..26054b4b467c63d3bce92b8226ec767ad65ad960 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c @@ -109,7 +109,7 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk, return err; skb_reserve(skb, MAX_PHONET_HEADER); - err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len); + err = memcpy_from_msg((void *)skb_put(skb, len), msg, len); if (err < 0) { kfree_skb(skb); return err; @@ -150,7 +150,7 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk, copylen = len; } - rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen); + rval = skb_copy_datagram_msg(skb, 0, msg, copylen); if (rval) { rval = -EFAULT; goto out; diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index e9a83a637185aa227c6b0b1a2dc6ebc50aab7385..fa8237fdc57b26dfc0bbc2253f69e6f6df731afb 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c @@ -203,8 +203,7 @@ static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev) len = skb->len; err = pep_write(sk, skb); if (err) { - LIMIT_NETDEBUG(KERN_WARNING"%s: TX error (%d)\n", - dev->name, err); + net_dbg_ratelimited("%s: TX error (%d)\n", dev->name, err); dev->stats.tx_aborted_errors++; dev->stats.tx_errors++; } else { diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 70a547ea51777784fdd545b4c86d7da69991a53a..5d3f2b7507d45a9b78ba0fd412cc3a8f57ad9178 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -272,8 +272,8 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) hdr = pnp_hdr(skb); if (hdr->data[0] != PN_PEP_TYPE_COMMON) { - LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", - (unsigned int)hdr->data[0]); + net_dbg_ratelimited("Phonet unknown PEP type: %u\n", + (unsigned int)hdr->data[0]); return -EOPNOTSUPP; } @@ -304,8 +304,8 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) break; default: - LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n", - (unsigned int)hdr->data[1]); + net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", + (unsigned int)hdr->data[1]); return -EOPNOTSUPP; } if (wake) @@ -451,8 +451,8 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) break; default: - LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n", - hdr->message_id); + net_dbg_ratelimited("Phonet unknown PEP message: %u\n", + hdr->message_id); err = -EINVAL; } out: @@ -1141,7 +1141,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, return err; skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned); - err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err < 0) goto outfree; @@ -1296,7 +1296,7 @@ copy: else len = skb->len; - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); + err = skb_copy_datagram_msg(skb, 0, msg, len); if (!err) err = (flags & MSG_TRUNC) ? skb->len : len; diff --git a/net/rds/ib.h b/net/rds/ib.h index 7280ab8810c203ed574131c4c9b8a5ea10cf292b..c36d713229e0f5c5a1b43fe227a1e04480ba100d 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -316,8 +316,7 @@ int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic); void rds_ib_recv_free_caches(struct rds_ib_connection *ic); void rds_ib_recv_refill(struct rds_connection *conn, int prefill); void rds_ib_inc_free(struct rds_incoming *inc); -int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, - size_t size); +int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context); void rds_ib_recv_tasklet_fn(unsigned long data); void rds_ib_recv_init_ring(struct rds_ib_connection *ic); diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index d67de453c35aae7257bd29e9791adc2595f6a7d7..1b981a4e42c214d575a838b096da368a7f0316c6 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -472,15 +472,12 @@ static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache return head; } -int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, - size_t size) +int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) { struct rds_ib_incoming *ibinc; struct rds_page_frag *frag; - struct iovec *iov = first_iov; unsigned long to_copy; unsigned long frag_off = 0; - unsigned long iov_off = 0; int copied = 0; int ret; u32 len; @@ -489,37 +486,25 @@ int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); len = be32_to_cpu(inc->i_hdr.h_len); - while (copied < size && copied < len) { + while (iov_iter_count(to) && copied < len) { if (frag_off == RDS_FRAG_SIZE) { frag = list_entry(frag->f_item.next, struct rds_page_frag, f_item); frag_off = 0; } - while (iov_off == iov->iov_len) { - iov_off = 0; - iov++; - } - - to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off); - to_copy = min_t(size_t, to_copy, size - copied); + to_copy = min_t(unsigned long, iov_iter_count(to), + RDS_FRAG_SIZE - frag_off); to_copy = min_t(unsigned long, to_copy, len - copied); - rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " - "[%p, %u] + %lu\n", - to_copy, iov->iov_base, iov->iov_len, iov_off, - sg_page(&frag->f_sg), frag->f_sg.offset, frag_off); - /* XXX needs + offset for multiple recvs per page */ - ret = rds_page_copy_to_user(sg_page(&frag->f_sg), - frag->f_sg.offset + frag_off, - iov->iov_base + iov_off, - to_copy); - if (ret) { - copied = ret; - break; - } + rds_stats_add(s_copy_to_user, to_copy); + ret = copy_page_to_iter(sg_page(&frag->f_sg), + frag->f_sg.offset + frag_off, + to_copy, + to); + if (ret != to_copy) + return -EFAULT; - iov_off += to_copy; frag_off += to_copy; copied += to_copy; } diff --git a/net/rds/iw.h b/net/rds/iw.h index 04ce3b193f79022b7b585906a60432696fcb5dd5..cbe6674e31ee52f6c9fd4380e3f0942127ede7c9 100644 --- a/net/rds/iw.h +++ b/net/rds/iw.h @@ -325,8 +325,7 @@ int rds_iw_recv(struct rds_connection *conn); int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, gfp_t page_gfp, int prefill); void rds_iw_inc_free(struct rds_incoming *inc); -int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, - size_t size); +int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); void rds_iw_recv_tasklet_fn(unsigned long data); void rds_iw_recv_init_ring(struct rds_iw_connection *ic); diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index aa8bf6786008afdb6de72221d4678628f93469a0..a66d1794b2d0472e511a179ae9872c2766fb8dd8 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c @@ -303,15 +303,12 @@ void rds_iw_inc_free(struct rds_incoming *inc) BUG_ON(atomic_read(&rds_iw_allocation) < 0); } -int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, - size_t size) +int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) { struct rds_iw_incoming *iwinc; struct rds_page_frag *frag; - struct iovec *iov = first_iov; unsigned long to_copy; unsigned long frag_off = 0; - unsigned long iov_off = 0; int copied = 0; int ret; u32 len; @@ -320,37 +317,25 @@ int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item); len = be32_to_cpu(inc->i_hdr.h_len); - while (copied < size && copied < len) { + while (iov_iter_count(to) && copied < len) { if (frag_off == RDS_FRAG_SIZE) { frag = list_entry(frag->f_item.next, struct rds_page_frag, f_item); frag_off = 0; } - while (iov_off == iov->iov_len) { - iov_off = 0; - iov++; - } - - to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off); - to_copy = min_t(size_t, to_copy, size - copied); + to_copy = min_t(unsigned long, iov_iter_count(to), + RDS_FRAG_SIZE - frag_off); to_copy = min_t(unsigned long, to_copy, len - copied); - rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " - "[%p, %lu] + %lu\n", - to_copy, iov->iov_base, iov->iov_len, iov_off, - frag->f_page, frag->f_offset, frag_off); - /* XXX needs + offset for multiple recvs per page */ - ret = rds_page_copy_to_user(frag->f_page, - frag->f_offset + frag_off, - iov->iov_base + iov_off, - to_copy); - if (ret) { - copied = ret; - break; - } + rds_stats_add(s_copy_to_user, to_copy); + ret = copy_page_to_iter(frag->f_page, + frag->f_offset + frag_off, + to_copy, + to); + if (ret != to_copy) + return -EFAULT; - iov_off += to_copy; frag_off += to_copy; copied += to_copy; } diff --git a/net/rds/message.c b/net/rds/message.c index aba232f9f3081968081edb0ed6ece382e954b653..ff2202218187530378b270b0df4838286cb9fc14 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -264,75 +264,54 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in return rm; } -int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov, - size_t total_len) +int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from) { unsigned long to_copy; - unsigned long iov_off; unsigned long sg_off; - struct iovec *iov; struct scatterlist *sg; int ret = 0; - rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); + rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from)); /* * now allocate and copy in the data payload. */ sg = rm->data.op_sg; - iov = first_iov; - iov_off = 0; sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ - while (total_len) { + while (iov_iter_count(from)) { if (!sg_page(sg)) { - ret = rds_page_remainder_alloc(sg, total_len, + ret = rds_page_remainder_alloc(sg, iov_iter_count(from), GFP_HIGHUSER); if (ret) - goto out; + return ret; rm->data.op_nents++; sg_off = 0; } - while (iov_off == iov->iov_len) { - iov_off = 0; - iov++; - } - - to_copy = min(iov->iov_len - iov_off, sg->length - sg_off); - to_copy = min_t(size_t, to_copy, total_len); - - rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to " - "sg [%p, %u, %u] + %lu\n", - to_copy, iov->iov_base, iov->iov_len, iov_off, - (void *)sg_page(sg), sg->offset, sg->length, sg_off); + to_copy = min_t(unsigned long, iov_iter_count(from), + sg->length - sg_off); - ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off, - iov->iov_base + iov_off, - to_copy); - if (ret) - goto out; + rds_stats_add(s_copy_from_user, to_copy); + ret = copy_page_from_iter(sg_page(sg), sg->offset + sg_off, + to_copy, from); + if (ret != to_copy) + return -EFAULT; - iov_off += to_copy; - total_len -= to_copy; sg_off += to_copy; if (sg_off == sg->length) sg++; } -out: return ret; } -int rds_message_inc_copy_to_user(struct rds_incoming *inc, - struct iovec *first_iov, size_t size) +int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) { struct rds_message *rm; - struct iovec *iov; struct scatterlist *sg; unsigned long to_copy; - unsigned long iov_off; unsigned long vec_off; int copied; int ret; @@ -341,36 +320,20 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc, rm = container_of(inc, struct rds_message, m_inc); len = be32_to_cpu(rm->m_inc.i_hdr.h_len); - iov = first_iov; - iov_off = 0; sg = rm->data.op_sg; vec_off = 0; copied = 0; - while (copied < size && copied < len) { - while (iov_off == iov->iov_len) { - iov_off = 0; - iov++; - } - - to_copy = min(iov->iov_len - iov_off, sg->length - vec_off); - to_copy = min_t(size_t, to_copy, size - copied); + while (iov_iter_count(to) && copied < len) { + to_copy = min(iov_iter_count(to), sg->length - vec_off); to_copy = min_t(unsigned long, to_copy, len - copied); - rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to " - "sg [%p, %u, %u] + %lu\n", - to_copy, iov->iov_base, iov->iov_len, iov_off, - sg_page(sg), sg->offset, sg->length, vec_off); - - ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off, - iov->iov_base + iov_off, - to_copy); - if (ret) { - copied = ret; - break; - } + rds_stats_add(s_copy_to_user, to_copy); + ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off, + to_copy, to); + if (ret != to_copy) + return -EFAULT; - iov_off += to_copy; vec_off += to_copy; copied += to_copy; diff --git a/net/rds/rds.h b/net/rds/rds.h index 48f8ffc60f8f1cee8e63448fe3aa7aaac0d5d4c1..c2a5eef41343c816f70e6dc16fc7a2fa9ea2d684 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -431,8 +431,7 @@ struct rds_transport { int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op); int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op); int (*recv)(struct rds_connection *conn); - int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov, - size_t size); + int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to); void (*inc_free)(struct rds_incoming *inc); int (*cm_handle_connect)(struct rdma_cm_id *cm_id, @@ -657,8 +656,7 @@ rds_conn_connecting(struct rds_connection *conn) /* message.c */ struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents); -int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov, - size_t total_len); +int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from); struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); void rds_message_populate_header(struct rds_header *hdr, __be16 sport, __be16 dport, u64 seq); @@ -667,8 +665,7 @@ int rds_message_add_extension(struct rds_header *hdr, int rds_message_next_extension(struct rds_header *hdr, unsigned int *pos, void *buf, unsigned int *buflen); int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset); -int rds_message_inc_copy_to_user(struct rds_incoming *inc, - struct iovec *first_iov, size_t size); +int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); void rds_message_inc_free(struct rds_incoming *inc); void rds_message_addref(struct rds_message *rm); void rds_message_put(struct rds_message *rm); diff --git a/net/rds/recv.c b/net/rds/recv.c index bd82522534fc52f1705c7833d1b443c4ca695398..f9ec1acd801cb182372e38a6f3c26d1dbe535137 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -414,6 +414,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, goto out; while (1) { + struct iov_iter save; /* If there are pending notifications, do those - and nothing else */ if (!list_empty(&rs->rs_notify_queue)) { ret = rds_notify_queue_get(rs, msg); @@ -449,8 +450,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, rdsdebug("copying inc %p from %pI4:%u to user\n", inc, &inc->i_conn->c_faddr, ntohs(inc->i_hdr.h_sport)); - ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov, - size); + save = msg->msg_iter; + ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter); if (ret < 0) break; @@ -463,6 +464,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, rds_inc_put(inc); inc = NULL; rds_stats_inc(s_recv_deliver_raced); + msg->msg_iter = save; continue; } diff --git a/net/rds/send.c b/net/rds/send.c index 0a64541020b0b52edb0cf197a55ff12cc36ccb8e..42f65d4305c88dc5db279fec71d5fb844e708cc1 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -826,7 +826,7 @@ static int rds_rm_size(struct msghdr *msg, int data_len) int cmsg_groups = 0; int retval; - for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { + for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; @@ -878,7 +878,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg; int ret = 0; - for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { + for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; @@ -982,7 +982,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, ret = -ENOMEM; goto out; } - ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); + ret = rds_message_copy_from_user(rm, &msg->msg_iter); if (ret) goto out; } diff --git a/net/rds/tcp.h b/net/rds/tcp.h index 65637491f72857954207dc7fa974335110d44ef4..0dbdd37162da34ea33eddbd610d90aa78b32b93c 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h @@ -69,8 +69,7 @@ void rds_tcp_recv_exit(void); void rds_tcp_data_ready(struct sock *sk); int rds_tcp_recv(struct rds_connection *conn); void rds_tcp_inc_free(struct rds_incoming *inc); -int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, - size_t size); +int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); /* tcp_send.c */ void rds_tcp_xmit_prepare(struct rds_connection *conn); diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index 9ae6e0a264ec0c52d2d495da66d1acdfb36f0917..fbc5ef88bc0e692ea4cf9cb59f6163905510928e 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@ -59,50 +59,30 @@ void rds_tcp_inc_free(struct rds_incoming *inc) /* * this is pretty lame, but, whatever. */ -int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, - size_t size) +int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) { struct rds_tcp_incoming *tinc; - struct iovec *iov, tmp; struct sk_buff *skb; - unsigned long to_copy, skb_off; int ret = 0; - if (size == 0) + if (!iov_iter_count(to)) goto out; tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); - iov = first_iov; - tmp = *iov; skb_queue_walk(&tinc->ti_skb_list, skb) { - skb_off = 0; - while (skb_off < skb->len) { - while (tmp.iov_len == 0) { - iov++; - tmp = *iov; - } - - to_copy = min(tmp.iov_len, size); + unsigned long to_copy, skb_off; + for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) { + to_copy = iov_iter_count(to); to_copy = min(to_copy, skb->len - skb_off); - rdsdebug("ret %d size %zu skb %p skb_off %lu " - "skblen %d iov_base %p iov_len %zu cpy %lu\n", - ret, size, skb, skb_off, skb->len, - tmp.iov_base, tmp.iov_len, to_copy); - - /* modifies tmp as it copies */ - if (skb_copy_datagram_iovec(skb, skb_off, &tmp, - to_copy)) { - ret = -EFAULT; - goto out; - } + if (skb_copy_datagram_iter(skb, skb_off, to, to_copy)) + return -EFAULT; rds_stats_add(s_copy_to_user, to_copy); - size -= to_copy; ret += to_copy; - skb_off += to_copy; - if (size == 0) + + if (!iov_iter_count(to)) goto out; } } diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index a85c1a086ae44f71b48be0112e08ae5164ac2c9e..43bac7c4dd9e62b136b97ad2fa99d81dbd39bf11 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1121,7 +1121,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, skb_reset_transport_header(skb); skb_put(skb, len); - err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); + err = memcpy_from_msg(skb_transport_header(skb), msg, len); if (err) { kfree_skb(skb); return err; @@ -1249,7 +1249,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + skb_copy_datagram_msg(skb, 0, msg, copied); if (msg->msg_name) { struct sockaddr_rose *srose; diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index 0b4b9a79f5abd0fb24e413551550aba25e7ce8e4..e1a9373e59799fd2a9cd998fbdc4399d2d021f6a 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -45,7 +45,7 @@ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg, if (msg->msg_controllen == 0) return -EINVAL; - for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { + for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; @@ -531,14 +531,12 @@ static int rxrpc_send_data(struct kiocb *iocb, struct rxrpc_skb_priv *sp; unsigned char __user *from; struct sk_buff *skb; - struct iovec *iov; + const struct iovec *iov; struct sock *sk = &rx->sk; long timeo; bool more; int ret, ioc, segment, copied; - _enter(",,,{%zu},%zu", msg->msg_iovlen, len); - timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); /* this should be in poll */ @@ -547,8 +545,8 @@ static int rxrpc_send_data(struct kiocb *iocb, if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) return -EPIPE; - iov = msg->msg_iov; - ioc = msg->msg_iovlen - 1; + iov = msg->msg_iter.iov; + ioc = msg->msg_iter.nr_segs - 1; from = iov->iov_base; segment = iov->iov_len; iov++; diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index e9aaa65c07784cf9991f5bef279e4dab49a6912f..4575485ad1b4d02796717879e09ad9078e232617 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -180,7 +180,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, if (copy > len - copied) copy = len - copied; - ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); + ret = skb_copy_datagram_msg(skb, offset, msg, copy); if (ret < 0) goto copy_error; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index a1a8e29e5fc995abdda1fe817ca40e12c85a8ecd..c54c9d9d1ffb814e7f4ead7e1db3dbf6c4a9adb7 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -22,8 +22,9 @@ menuconfig NET_SCHED This code is considered to be experimental. To administer these schedulers, you'll need the user-level utilities - from the package iproute2+tc at . - That package also contains some documentation; for more, check out + from the package iproute2+tc at + . That package + also contains some documentation; for more, check out . This Quality of Service (QoS) support will enable you to use @@ -336,7 +337,7 @@ config NET_SCH_PLUG of virtual machines by allowing the generated network output to be rolled back if needed. - For more information, please refer to http://wiki.xensource.com/xenwiki/Remus + For more information, please refer to Say Y here if you are using this kernel for Xen dom0 and want to protect Xen guests with Remus. @@ -686,6 +687,17 @@ config NET_ACT_CSUM To compile this code as a module, choose M here: the module will be called act_csum. +config NET_ACT_VLAN + tristate "Vlan manipulation" + depends on NET_CLS_ACT + ---help--- + Say Y here to push or pop vlan headers. + + If unsure, say N. + + To compile this code as a module, choose M here: the + module will be called act_vlan. + config NET_CLS_IND bool "Incoming device classification" depends on NET_CLS_U32 || NET_CLS_FW diff --git a/net/sched/Makefile b/net/sched/Makefile index 0a869a11f3e66cdd9c200541925e95cb311121ab..679f24ae7f933d59298be54ca96b4728b9f7280b 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o +obj-$(CONFIG_NET_ACT_VLAN) += act_vlan.o obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index d6bcbd9f7791eb25cd250358b21b73b7bb80c3a0..7fffc2272701adb42d109318a80e8fef6fc3289f 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -1,5 +1,5 @@ /* - * net/sched/gact.c Generic actions + * net/sched/act_gact.c Generic actions * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8a64a0734aeebd29894111b4e93d288076b1df35..cbc8dd7dd48a50e77fdafa7b8cf4041659995cbb 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -1,5 +1,5 @@ /* - * net/sched/ipt.c iptables target interface + * net/sched/act_ipt.c iptables target interface * *TODO: Add other tables. For now we only support the ipv4 table targets * diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index eb48306033d91ef131fd67fcd432f73ed55a43cb..5953517ec059aabf3613979d06b52ba81c38c483 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -1,5 +1,5 @@ /* - * net/sched/mirred.c packet mirroring and redirect actions + * net/sched/act_mirred.c packet mirroring and redirect actions * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 5f9bcb2e080bcd666c7c374a837916a227a0e41c..59649d588d79d6d05c47e3a111fa1dd0397c7342 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -1,5 +1,5 @@ /* - * net/sched/pedit.c Generic packet editor + * net/sched/act_pedit.c Generic packet editor * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 69791ca77a05f6231b1349bce5aeb38aa018bd8c..9a1c42a43f92e7d1015fcf153cb2a761abfe2263 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -1,5 +1,5 @@ /* - * net/sched/police.c Input police filter. + * net/sched/act_police.c Input police filter * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 992c2317ce8871c84c45f2d387c5a3b5408d11fb..6a8d9488613a76d9cb2bd03e932ef95487ca0745 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -1,5 +1,5 @@ /* - * net/sched/simp.c Simple example of an action + * net/sched/act_simple.c Simple example of an action * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c new file mode 100644 index 0000000000000000000000000000000000000000..d735ecf0b1a78d3fac6ac80b95931cc6cf6caba0 --- /dev/null +++ b/net/sched/act_vlan.c @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2014 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define VLAN_TAB_MASK 15 + +static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) +{ + struct tcf_vlan *v = a->priv; + int action; + int err; + + spin_lock(&v->tcf_lock); + v->tcf_tm.lastuse = jiffies; + bstats_update(&v->tcf_bstats, skb); + action = v->tcf_action; + + switch (v->tcfv_action) { + case TCA_VLAN_ACT_POP: + err = skb_vlan_pop(skb); + if (err) + goto drop; + break; + case TCA_VLAN_ACT_PUSH: + err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid); + if (err) + goto drop; + break; + default: + BUG(); + } + + goto unlock; + +drop: + action = TC_ACT_SHOT; + v->tcf_qstats.drops++; +unlock: + spin_unlock(&v->tcf_lock); + return action; +} + +static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { + [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) }, + [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 }, + [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 }, +}; + +static int tcf_vlan_init(struct net *net, struct nlattr *nla, + struct nlattr *est, struct tc_action *a, + int ovr, int bind) +{ + struct nlattr *tb[TCA_VLAN_MAX + 1]; + struct tc_vlan *parm; + struct tcf_vlan *v; + int action; + __be16 push_vid = 0; + __be16 push_proto = 0; + int ret = 0; + int err; + + if (!nla) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy); + if (err < 0) + return err; + + if (!tb[TCA_VLAN_PARMS]) + return -EINVAL; + parm = nla_data(tb[TCA_VLAN_PARMS]); + switch (parm->v_action) { + case TCA_VLAN_ACT_POP: + break; + case TCA_VLAN_ACT_PUSH: + if (!tb[TCA_VLAN_PUSH_VLAN_ID]) + return -EINVAL; + push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); + if (push_vid >= VLAN_VID_MASK) + return -ERANGE; + + if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { + push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); + switch (push_proto) { + case htons(ETH_P_8021Q): + case htons(ETH_P_8021AD): + break; + default: + return -EPROTONOSUPPORT; + } + } else { + push_proto = htons(ETH_P_8021Q); + } + break; + default: + return -EINVAL; + } + action = parm->v_action; + + if (!tcf_hash_check(parm->index, a, bind)) { + ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind); + if (ret) + return ret; + + ret = ACT_P_CREATED; + } else { + if (bind) + return 0; + tcf_hash_release(a, bind); + if (!ovr) + return -EEXIST; + } + + v = to_vlan(a); + + spin_lock_bh(&v->tcf_lock); + + v->tcfv_action = action; + v->tcfv_push_vid = push_vid; + v->tcfv_push_proto = push_proto; + + v->tcf_action = parm->action; + + spin_unlock_bh(&v->tcf_lock); + + if (ret == ACT_P_CREATED) + tcf_hash_insert(a); + return ret; +} + +static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, + int bind, int ref) +{ + unsigned char *b = skb_tail_pointer(skb); + struct tcf_vlan *v = a->priv; + struct tc_vlan opt = { + .index = v->tcf_index, + .refcnt = v->tcf_refcnt - ref, + .bindcnt = v->tcf_bindcnt - bind, + .action = v->tcf_action, + .v_action = v->tcfv_action, + }; + struct tcf_t t; + + if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; + + if (v->tcfv_action == TCA_VLAN_ACT_PUSH && + (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) || + nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, v->tcfv_push_proto))) + goto nla_put_failure; + + t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install); + t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse); + t.expires = jiffies_to_clock_t(v->tcf_tm.expires); + if (nla_put(skb, TCA_VLAN_TM, sizeof(t), &t)) + goto nla_put_failure; + return skb->len; + +nla_put_failure: + nlmsg_trim(skb, b); + return -1; +} + +static struct tc_action_ops act_vlan_ops = { + .kind = "vlan", + .type = TCA_ACT_VLAN, + .owner = THIS_MODULE, + .act = tcf_vlan, + .dump = tcf_vlan_dump, + .init = tcf_vlan_init, +}; + +static int __init vlan_init_module(void) +{ + return tcf_register_action(&act_vlan_ops, VLAN_TAB_MASK); +} + +static void __exit vlan_cleanup_module(void) +{ + tcf_unregister_action(&act_vlan_ops); +} + +module_init(vlan_init_module); +module_exit(vlan_cleanup_module); + +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("vlan manipulation actions"); +MODULE_LICENSE("GPL v2"); diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index cd61280941e5c72d8c22e4dfe18ec517e90cba53..5aed341406c2366ed5c7b50c54f113e21bd27fde 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -72,10 +72,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle) return l; } -static void basic_put(struct tcf_proto *tp, unsigned long f) -{ -} - static int basic_init(struct tcf_proto *tp) { struct basic_head *head; @@ -113,18 +109,12 @@ static void basic_destroy(struct tcf_proto *tp) static int basic_delete(struct tcf_proto *tp, unsigned long arg) { - struct basic_head *head = rtnl_dereference(tp->root); - struct basic_filter *t, *f = (struct basic_filter *) arg; - - list_for_each_entry(t, &head->flist, link) - if (t == f) { - list_del_rcu(&t->link); - tcf_unbind_filter(tp, &t->res); - call_rcu(&t->rcu, basic_delete_filter); - return 0; - } + struct basic_filter *f = (struct basic_filter *) arg; - return -ENOENT; + list_del_rcu(&f->link); + tcf_unbind_filter(tp, &f->res); + call_rcu(&f->rcu, basic_delete_filter); + return 0; } static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = { @@ -188,10 +178,9 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; } - err = -ENOBUFS; fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); - if (fnew == NULL) - goto errout; + if (!fnew) + return -ENOBUFS; tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE); err = -EINVAL; @@ -293,7 +282,6 @@ static struct tcf_proto_ops cls_basic_ops __read_mostly = { .init = basic_init, .destroy = basic_destroy, .get = basic_get, - .put = basic_put, .change = basic_change, .delete = basic_delete, .walk = basic_walk, diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index eed49d1d0878d6e55d23e622e58f3bf908c9cab9..84c8219c3e1ce18867466786ae061d13178d7519 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -109,19 +109,12 @@ static void __cls_bpf_delete_prog(struct rcu_head *rcu) static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) { - struct cls_bpf_head *head = rtnl_dereference(tp->root); - struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg; + struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg; - list_for_each_entry(prog, &head->plist, link) { - if (prog == todel) { - list_del_rcu(&prog->link); - tcf_unbind_filter(tp, &prog->res); - call_rcu(&prog->rcu, __cls_bpf_delete_prog); - return 0; - } - } - - return -ENOENT; + list_del_rcu(&prog->link); + tcf_unbind_filter(tp, &prog->res); + call_rcu(&prog->rcu, __cls_bpf_delete_prog); + return 0; } static void cls_bpf_destroy(struct tcf_proto *tp) @@ -148,7 +141,7 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) if (head == NULL) return 0UL; - list_for_each_entry_rcu(prog, &head->plist, link) { + list_for_each_entry(prog, &head->plist, link) { if (prog->handle == handle) { ret = (unsigned long) prog; break; @@ -158,10 +151,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) return ret; } -static void cls_bpf_put(struct tcf_proto *tp, unsigned long f) -{ -} - static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, struct cls_bpf_prog *prog, unsigned long base, struct nlattr **tb, @@ -344,7 +333,7 @@ static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog; - list_for_each_entry_rcu(prog, &head->plist, link) { + list_for_each_entry(prog, &head->plist, link) { if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long) prog, arg) < 0) { @@ -363,7 +352,6 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = { .init = cls_bpf_init, .destroy = cls_bpf_destroy, .get = cls_bpf_get, - .put = cls_bpf_put, .change = cls_bpf_change, .delete = cls_bpf_delete, .walk = cls_bpf_walk, diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index d61a801222c10205e339a5d9d622b4ecce804ed4..221697ab0247c5e786c70e181a43c1145a30748b 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -67,10 +67,6 @@ static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) return 0UL; } -static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f) -{ -} - static int cls_cgroup_init(struct tcf_proto *tp) { return 0; @@ -117,11 +113,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, return -ENOBUFS; tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); - if (head) - new->handle = head->handle; - else - new->handle = handle; - + new->handle = handle; new->tp = tp; err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], cgroup_policy); @@ -185,7 +177,6 @@ static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long struct sk_buff *skb, struct tcmsg *t) { struct cls_cgroup_head *head = rtnl_dereference(tp->root); - unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; t->tcm_handle = head->handle; @@ -206,7 +197,7 @@ static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long return skb->len; nla_put_failure: - nlmsg_trim(skb, b); + nla_nest_cancel(skb, nest); return -1; } @@ -217,7 +208,6 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { .classify = cls_cgroup_classify, .destroy = cls_cgroup_destroy, .get = cls_cgroup_get, - .put = cls_cgroup_put, .delete = cls_cgroup_delete, .walk = cls_cgroup_walk, .dump = cls_cgroup_dump, diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 4ac515f2a6cecbcb56159c9c11892771e3816ddd..15d68f24a521a8ec66fc42691f031d864f589416 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -426,10 +426,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, goto err2; /* Copy fold into fnew */ - fnew->handle = fold->handle; - fnew->keymask = fold->keymask; fnew->tp = fold->tp; - fnew->handle = fold->handle; fnew->nkeys = fold->nkeys; fnew->keymask = fold->keymask; @@ -578,16 +575,12 @@ static unsigned long flow_get(struct tcf_proto *tp, u32 handle) struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f; - list_for_each_entry_rcu(f, &head->filters, list) + list_for_each_entry(f, &head->filters, list) if (f->handle == handle) return (unsigned long)f; return 0; } -static void flow_put(struct tcf_proto *tp, unsigned long f) -{ -} - static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { @@ -645,7 +638,7 @@ static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, return skb->len; nla_put_failure: - nlmsg_trim(skb, nest); + nla_nest_cancel(skb, nest); return -1; } @@ -654,7 +647,7 @@ static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f; - list_for_each_entry_rcu(f, &head->filters, list) { + list_for_each_entry(f, &head->filters, list) { if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long)f, arg) < 0) { @@ -674,7 +667,6 @@ static struct tcf_proto_ops cls_flow_ops __read_mostly = { .change = flow_change, .delete = flow_delete, .get = flow_get, - .put = flow_put, .dump = flow_dump, .walk = flow_walk, .owner = THIS_MODULE, diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index dbfdfd1f1a9f8cc6d2364bed4c700eef4a029ed3..a5269f76004c2974a2e1e650433a7ba394710965 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -111,10 +111,6 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle) return 0; } -static void fw_put(struct tcf_proto *tp, unsigned long f) -{ -} - static int fw_init(struct tcf_proto *tp) { return 0; @@ -360,7 +356,6 @@ static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, { struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f = (struct fw_filter *)fh; - unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; if (f == NULL) @@ -401,7 +396,7 @@ static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, return skb->len; nla_put_failure: - nlmsg_trim(skb, b); + nla_nest_cancel(skb, nest); return -1; } @@ -411,7 +406,6 @@ static struct tcf_proto_ops cls_fw_ops __read_mostly = { .init = fw_init, .destroy = fw_destroy, .get = fw_get, - .put = fw_put, .change = fw_change, .delete = fw_delete, .walk = fw_walk, diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 109a329b719858bea8f8a0afc29a27369cb3b9d0..2ecd24688554e76e38d98b7338072a37d4c0cea1 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -256,10 +256,6 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) return 0; } -static void route4_put(struct tcf_proto *tp, unsigned long f) -{ -} - static int route4_init(struct tcf_proto *tp) { return 0; @@ -597,7 +593,6 @@ static int route4_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct route4_filter *f = (struct route4_filter *)fh; - unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; u32 id; @@ -639,7 +634,7 @@ static int route4_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, return skb->len; nla_put_failure: - nlmsg_trim(skb, b); + nla_nest_cancel(skb, nest); return -1; } @@ -649,7 +644,6 @@ static struct tcf_proto_ops cls_route4_ops __read_mostly = { .init = route4_init, .destroy = route4_destroy, .get = route4_get, - .put = route4_put, .change = route4_change, .delete = route4_delete, .walk = route4_walk, diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 6bb55f277a5a095822eb7db0bfe523e58405ecdf..edd8ade3fbc1f4358b4275940e6f62b3d814b3dd 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -271,10 +271,6 @@ static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) return 0; } -static void rsvp_put(struct tcf_proto *tp, unsigned long f) -{ -} - static int rsvp_init(struct tcf_proto *tp) { struct rsvp_head *data; @@ -657,7 +653,6 @@ static int rsvp_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, { struct rsvp_filter *f = (struct rsvp_filter *)fh; struct rsvp_session *s; - unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; struct tc_rsvp_pinfo pinfo; @@ -698,7 +693,7 @@ static int rsvp_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, return skb->len; nla_put_failure: - nlmsg_trim(skb, b); + nla_nest_cancel(skb, nest); return -1; } @@ -708,7 +703,6 @@ static struct tcf_proto_ops RSVP_OPS __read_mostly = { .init = rsvp_init, .destroy = rsvp_destroy, .get = rsvp_get, - .put = rsvp_put, .change = rsvp_change, .delete = rsvp_delete, .walk = rsvp_walk, diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 30f10fb07f4a7d50e390c7df730f94366e00fb73..bd49bf547a479f139b25e0507b090d51c137c519 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -116,13 +116,6 @@ static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle) return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL; } - -static void tcindex_put(struct tcf_proto *tp, unsigned long f) -{ - pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f); -} - - static int tcindex_init(struct tcf_proto *tp) { struct tcindex_data *p; @@ -496,11 +489,10 @@ static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, { struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; - unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; - pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n", - tp, fh, skb, t, p, r, b); + pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p\n", + tp, fh, skb, t, p, r); pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h); nest = nla_nest_start(skb, TCA_OPTIONS); @@ -550,7 +542,7 @@ static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, return skb->len; nla_put_failure: - nlmsg_trim(skb, b); + nla_nest_cancel(skb, nest); return -1; } @@ -560,7 +552,6 @@ static struct tcf_proto_ops cls_tcindex_ops __read_mostly = { .init = tcindex_init, .destroy = tcindex_destroy, .get = tcindex_get, - .put = tcindex_put, .change = tcindex_change, .delete = tcindex_delete, .walk = tcindex_walk, diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 0472909bb014c258e92f796736be7a527af843d1..09487afbfd5187a312ab155df5da43548d0c326b 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -299,10 +299,6 @@ static unsigned long u32_get(struct tcf_proto *tp, u32 handle) return (unsigned long)u32_lookup_key(ht, handle); } -static void u32_put(struct tcf_proto *tp, unsigned long f) -{ -} - static u32 gen_new_htid(struct tc_u_common *tp_c) { int i = 0x800; @@ -1021,7 +1017,6 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = { .init = u32_init, .destroy = u32_destroy, .get = u32_get, - .put = u32_put, .change = u32_change, .delete = u32_delete, .walk = u32_walk, diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index cbd7e1fd23b41bb7ba0bb348c3a1a287652cca93..9b05924cc386ecc2cdb9816be27e439637fb37b3 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -481,12 +481,11 @@ begin: if (likely(rate)) do_div(len, rate); /* Since socket rate can change later, - * clamp the delay to 125 ms. - * TODO: maybe segment the too big skb, as in commit - * e43ac79a4bc ("sch_tbf: segment too big GSO packets") + * clamp the delay to 1 second. + * Really, providers of too big packets should be fixed ! */ - if (unlikely(len > 125 * NSEC_PER_MSEC)) { - len = 125 * NSEC_PER_MSEC; + if (unlikely(len > NSEC_PER_SEC)) { + len = NSEC_PER_SEC; q->stat_pkts_too_long++; } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index b9ca32ebc1de0922499379dda1735d75abdd26bc..1e52decb7b59cf0b4173d0f17efdab8fefee5f26 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -94,7 +94,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, TC_H_MIN(skb->priority) <= q->flows_cnt) return TC_H_MIN(skb->priority); - filter = rcu_dereference(q->filter_list); + filter = rcu_dereference_bh(q->filter_list); if (!filter) return fq_codel_hash(q, skb) + 1; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index b34331967e020b6f1151b25be8f744e494a80ad6..179f1c8c0d8bba4aa00705e6c9ca41ef909f7d50 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -139,33 +139,20 @@ struct netem_sched_data { /* Time stamp put into socket buffer control block * Only valid when skbs are in our internal t(ime)fifo queue. + * + * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, + * and skb->next & skb->prev are scratch space for a qdisc, + * we save skb->tstamp value in skb->cb[] before destroying it. */ struct netem_skb_cb { psched_time_t time_to_send; ktime_t tstamp_save; }; -/* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp - * to hold a rb_node structure. - * - * If struct sk_buff layout is changed, the following checks will complain. - */ -static struct rb_node *netem_rb_node(struct sk_buff *skb) -{ - BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0); - BUILD_BUG_ON(offsetof(struct sk_buff, prev) != - offsetof(struct sk_buff, next) + sizeof(skb->next)); - BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) != - offsetof(struct sk_buff, prev) + sizeof(skb->prev)); - BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) + - sizeof(skb->prev) + - sizeof(skb->tstamp)); - return (struct rb_node *)&skb->next; -} static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) { - return (struct sk_buff *)rb; + return container_of(rb, struct sk_buff, rbnode); } static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) @@ -403,8 +390,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) else p = &parent->rb_left; } - rb_link_node(netem_rb_node(nskb), parent, p); - rb_insert_color(netem_rb_node(nskb), &q->t_root); + rb_link_node(&nskb->rbnode, parent, p); + rb_insert_color(&nskb->rbnode, &q->t_root); sch->q.qlen++; } diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 158701da2d31c8c862fcffd59404b7c08d3b610a..a3380917f1973dba6c43507bc16b4f3fbc090c8a 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -164,7 +164,7 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu */ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, struct sctp_sndrcvinfo *sinfo, - struct msghdr *msgh, int msg_len) + struct iov_iter *from) { int max, whole, i, offset, over, err; int len, first_len; @@ -172,6 +172,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, struct sctp_chunk *chunk; struct sctp_datamsg *msg; struct list_head *pos, *temp; + size_t msg_len = iov_iter_count(from); __u8 frag; msg = sctp_datamsg_new(GFP_KERNEL); @@ -279,12 +280,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, goto errout; } - err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov); + err = sctp_user_addto_chunk(chunk, len, from); if (err < 0) goto errout_chunk_free; - offset += len; - /* Put the chunk->skb back into the form expected by send. */ __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr - (__u8 *)chunk->skb->data); @@ -317,7 +316,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, goto errout; } - err = sctp_user_addto_chunk(chunk, offset, over, msgh->msg_iov); + err = sctp_user_addto_chunk(chunk, over, from); /* Put the chunk->skb back into the form expected by send. */ __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr diff --git a/net/sctp/output.c b/net/sctp/output.c index 42dffd4283898f718630c76cf6dece316492966f..fc5e45b8a832d94367178536a341001021a19c44 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -401,12 +401,12 @@ int sctp_packet_transmit(struct sctp_packet *packet) sk = chunk->skb->sk; /* Allocate the new skb. */ - nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); + nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC); if (!nskb) goto nomem; /* Make sure the outbound skb has enough header room reserved. */ - skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); + skb_reserve(nskb, packet->overhead + MAX_HEADER); /* Set the owning socket so that we know where to get the * destination IP address. diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 34229ee7f379902b16a7659f82545f10e5908b17..0697eda5aed8f8c897973bd173ad40d061775030 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -417,7 +417,7 @@ static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) if (*pos == 0) seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " - "REM_ADDR_RTX START\n"); + "REM_ADDR_RTX START STATE\n"); return (void *)pos; } @@ -490,14 +490,20 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) * Note: We don't have a way to tally this at the moment * so lets just leave it as zero for the moment */ - seq_printf(seq, "0 "); + seq_puts(seq, "0 "); /* * remote address start time (START). This is also not * currently implemented, but we can record it with a * jiffies marker in a subsequent patch */ - seq_printf(seq, "0"); + seq_puts(seq, "0 "); + + /* + * The current state of this destination. I.e. + * SCTP_ACTIVE, SCTP_INACTIVE, ... + */ + seq_printf(seq, "%d", tsp->state); seq_printf(seq, "\n"); } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 9f32741abb1c7b142265297dc2fac78b74b3d195..e49e231cef529ecaf4bf2bc3e6a168b8f8b6fc06 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1001,7 +1001,7 @@ no_mem: /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, - const struct msghdr *msg, + struct msghdr *msg, size_t paylen) { struct sctp_chunk *retval; @@ -1018,7 +1018,7 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, if (!payload) goto err_payload; - err = memcpy_fromiovec(payload, msg->msg_iov, paylen); + err = memcpy_from_msg(payload, msg, paylen); if (err < 0) goto err_copy; } @@ -1491,26 +1491,26 @@ static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, * chunk is not big enough. * Returns a kernel err value. */ -int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, - struct iovec *data) +int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len, + struct iov_iter *from) { - __u8 *target; - int err = 0; + void *target; + ssize_t copied; /* Make room in chunk for data. */ target = skb_put(chunk->skb, len); /* Copy data (whole iovec) into chunk */ - if ((err = memcpy_fromiovecend(target, data, off, len))) - goto out; + copied = copy_from_iter(target, len, from); + if (copied != len) + return -EFAULT; /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(ntohs(chunk->chunk_hdr->length) + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); -out: - return err; + return 0; } /* Helper function to assign a TSN if needed. This assumes that both diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 634a2abb5f3a25f15ca99c4737190c1e5e17b27a..2625eccb77d5d7738f9e50930ab37a6db6a80760 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -162,7 +162,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) chunk->skb->destructor = sctp_wfree; /* Save the chunk pointer in skb for sctp_wfree to use later. */ - *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; + skb_shinfo(chunk->skb)->destructor_arg = chunk; asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + @@ -1947,7 +1947,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, } /* Break the message into multiple chunks of maximum size. */ - datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); + datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); if (IS_ERR(datamsg)) { err = PTR_ERR(datamsg); goto out_free; @@ -2095,7 +2095,7 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, if (copied > len) copied = len; - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); event = sctp_skb2event(skb); @@ -6592,8 +6592,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) struct cmsghdr *cmsg; struct msghdr *my_msg = (struct msghdr *)msg; - for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; - cmsg = CMSG_NXTHDR(my_msg, cmsg)) { + for_each_cmsghdr(cmsg, my_msg) { if (!CMSG_OK(my_msg, cmsg)) return -EINVAL; @@ -6870,14 +6869,10 @@ static void sctp_wake_up_waiters(struct sock *sk, */ static void sctp_wfree(struct sk_buff *skb) { - struct sctp_association *asoc; - struct sctp_chunk *chunk; - struct sock *sk; + struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; + struct sctp_association *asoc = chunk->asoc; + struct sock *sk = asoc->base.sk; - /* Get the saved chunk pointer. */ - chunk = *((struct sctp_chunk **)(skb->cb)); - asoc = chunk->asoc; - sk = asoc->base.sk; asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index d49dc2ed30adb97a809eb37902b9956c366a2862..ce469d648ffbe166f9ae1c5650f481256f31a7f8 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -205,9 +205,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) goto out_free; - if (!sctp_ulpevent_is_notification(event)) + if (!sctp_ulpevent_is_notification(event)) { sk_mark_napi_id(sk, skb); - + sk_incoming_cpu_update(sk); + } /* Check if the user wishes to receive this event. */ if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) goto out_free; diff --git a/net/socket.c b/net/socket.c index fe20c319a0bb37dd475b7a8a32dccd57ac4e15d9..8809afccf7fadc1b43db802aa3160c03e3c473fa 100644 --- a/net/socket.c +++ b/net/socket.c @@ -651,7 +651,8 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); } -int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) +static int do_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t size, bool nosec) { struct kiocb iocb; struct sock_iocb siocb; @@ -659,25 +660,22 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; - ret = __sock_sendmsg(&iocb, sock, msg, size); + ret = nosec ? __sock_sendmsg_nosec(&iocb, sock, msg, size) : + __sock_sendmsg(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } + +int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) +{ + return do_sock_sendmsg(sock, msg, size, false); +} EXPORT_SYMBOL(sock_sendmsg); static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) { - struct kiocb iocb; - struct sock_iocb siocb; - int ret; - - init_sync_kiocb(&iocb, NULL); - iocb.private = &siocb; - ret = __sock_sendmsg_nosec(&iocb, sock, msg, size); - if (-EIOCBQUEUED == ret) - ret = wait_on_sync_kiocb(&iocb); - return ret; + return do_sock_sendmsg(sock, msg, size, true); } int kernel_sendmsg(struct socket *sock, struct msghdr *msg, @@ -691,8 +689,7 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ - msg->msg_iov = (struct iovec *)vec; - msg->msg_iovlen = num; + iov_iter_init(&msg->msg_iter, WRITE, (struct iovec *)vec, num, size); result = sock_sendmsg(sock, msg, size); set_fs(oldfs); return result; @@ -855,7 +852,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ - msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num; + iov_iter_init(&msg->msg_iter, READ, (struct iovec *)vec, num, size); result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; @@ -915,8 +912,7 @@ static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; - msg->msg_iov = (struct iovec *)iov; - msg->msg_iovlen = nr_segs; + iov_iter_init(&msg->msg_iter, READ, iov, nr_segs, size); msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); @@ -955,8 +951,7 @@ static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; - msg->msg_iov = (struct iovec *)iov; - msg->msg_iovlen = nr_segs; + iov_iter_init(&msg->msg_iter, WRITE, iov, nr_segs, size); msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; if (sock->type == SOCK_SEQPACKET) msg->msg_flags |= MSG_EOR; @@ -1800,8 +1795,7 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; - msg.msg_iov = &iov; - msg.msg_iovlen = 1; + iov_iter_init(&msg.msg_iter, WRITE, &iov, 1, len); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; @@ -1858,10 +1852,9 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, msg.msg_control = NULL; msg.msg_controllen = 0; - msg.msg_iovlen = 1; - msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; + iov_iter_init(&msg.msg_iter, READ, &iov, 1, size); /* Save some cycles and don't copy the address if not needed */ msg.msg_name = addr ? (struct sockaddr *)&address : NULL; /* We assume all kernel code knows the size of sockaddr_storage */ @@ -1988,13 +1981,27 @@ struct used_address { unsigned int name_len; }; -static int copy_msghdr_from_user(struct msghdr *kmsg, - struct msghdr __user *umsg) +static ssize_t copy_msghdr_from_user(struct msghdr *kmsg, + struct user_msghdr __user *umsg, + struct sockaddr __user **save_addr, + struct iovec **iov) { - if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) + struct sockaddr __user *uaddr; + struct iovec __user *uiov; + size_t nr_segs; + ssize_t err; + + if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) || + __get_user(uaddr, &umsg->msg_name) || + __get_user(kmsg->msg_namelen, &umsg->msg_namelen) || + __get_user(uiov, &umsg->msg_iov) || + __get_user(nr_segs, &umsg->msg_iovlen) || + __get_user(kmsg->msg_control, &umsg->msg_control) || + __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || + __get_user(kmsg->msg_flags, &umsg->msg_flags)) return -EFAULT; - if (kmsg->msg_name == NULL) + if (!uaddr) kmsg->msg_namelen = 0; if (kmsg->msg_namelen < 0) @@ -2002,10 +2009,35 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); - return 0; + + if (save_addr) + *save_addr = uaddr; + + if (uaddr && kmsg->msg_namelen) { + if (!save_addr) { + err = move_addr_to_kernel(uaddr, kmsg->msg_namelen, + kmsg->msg_name); + if (err < 0) + return err; + } + } else { + kmsg->msg_name = NULL; + kmsg->msg_namelen = 0; + } + + if (nr_segs > UIO_MAXIOV) + return -EMSGSIZE; + + err = rw_copy_check_uvector(save_addr ? READ : WRITE, + uiov, nr_segs, + UIO_FASTIOV, *iov, iov); + if (err >= 0) + iov_iter_init(&kmsg->msg_iter, save_addr ? READ : WRITE, + *iov, nr_segs, err); + return err; } -static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, +static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) { @@ -2017,34 +2049,15 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; - int err, ctl_len, total_len; - - err = -EFAULT; - if (MSG_CMSG_COMPAT & flags) { - if (get_compat_msghdr(msg_sys, msg_compat)) - return -EFAULT; - } else { - err = copy_msghdr_from_user(msg_sys, msg); - if (err) - return err; - } + int ctl_len, total_len; + ssize_t err; - if (msg_sys->msg_iovlen > UIO_FASTIOV) { - err = -EMSGSIZE; - if (msg_sys->msg_iovlen > UIO_MAXIOV) - goto out; - err = -ENOMEM; - iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), - GFP_KERNEL); - if (!iov) - goto out; - } + msg_sys->msg_name = &address; - /* This will also move the address data into kernel space */ - if (MSG_CMSG_COMPAT & flags) { - err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ); - } else - err = verify_iovec(msg_sys, iov, &address, VERIFY_READ); + if (MSG_CMSG_COMPAT & flags) + err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov); + else + err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov); if (err < 0) goto out_freeiov; total_len = err; @@ -2115,7 +2128,6 @@ out_freectl: out_freeiov: if (iov != iovstack) kfree(iov); -out: return err; } @@ -2123,7 +2135,7 @@ out: * BSD sendmsg interface */ -long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) +long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; @@ -2140,7 +2152,7 @@ out: return err; } -SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) +SYSCALL_DEFINE3(sendmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; @@ -2177,7 +2189,7 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { - err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry, + err = ___sys_sendmsg(sock, (struct user_msghdr __user *)compat_entry, &msg_sys, flags, &used_address); if (err < 0) break; @@ -2185,7 +2197,7 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, ++compat_entry; } else { err = ___sys_sendmsg(sock, - (struct msghdr __user *)entry, + (struct user_msghdr __user *)entry, &msg_sys, flags, &used_address); if (err < 0) break; @@ -2215,7 +2227,7 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, return __sys_sendmmsg(fd, mmsg, vlen, flags); } -static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, +static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = @@ -2223,44 +2235,22 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; - int err, total_len, len; + int total_len, len; + ssize_t err; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; - int __user *uaddr_len; - - if (MSG_CMSG_COMPAT & flags) { - if (get_compat_msghdr(msg_sys, msg_compat)) - return -EFAULT; - } else { - err = copy_msghdr_from_user(msg_sys, msg); - if (err) - return err; - } + int __user *uaddr_len = COMPAT_NAMELEN(msg); - if (msg_sys->msg_iovlen > UIO_FASTIOV) { - err = -EMSGSIZE; - if (msg_sys->msg_iovlen > UIO_MAXIOV) - goto out; - err = -ENOMEM; - iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), - GFP_KERNEL); - if (!iov) - goto out; - } + msg_sys->msg_name = &addr; - /* Save the user-mode address (verify_iovec will change the - * kernel msghdr to use the kernel address space) - */ - uaddr = (__force void __user *)msg_sys->msg_name; - uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) - err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); + err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov); else - err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE); + err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov); if (err < 0) goto out_freeiov; total_len = err; @@ -2303,7 +2293,6 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, out_freeiov: if (iov != iovstack) kfree(iov); -out: return err; } @@ -2311,7 +2300,7 @@ out: * BSD recvmsg interface */ -long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags) +long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; @@ -2328,7 +2317,7 @@ out: return err; } -SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, +SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) @@ -2373,7 +2362,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { - err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry, + err = ___sys_recvmsg(sock, (struct user_msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) @@ -2382,7 +2371,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, ++compat_entry; } else { err = ___sys_recvmsg(sock, - (struct msghdr __user *)entry, + (struct user_msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) @@ -2571,13 +2560,13 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) (int __user *)a[4]); break; case SYS_SENDMSG: - err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); + err = sys_sendmsg(a0, (struct user_msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: - err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); + err = sys_recvmsg(a0, (struct user_msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], diff --git a/net/switchdev/Kconfig b/net/switchdev/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..155754588fd65ab656736ab342a4851b4de0bb8b --- /dev/null +++ b/net/switchdev/Kconfig @@ -0,0 +1,13 @@ +# +# Configuration for Switch device support +# + +config NET_SWITCHDEV + boolean "Switch (and switch-ish) device support (EXPERIMENTAL)" + depends on INET + ---help--- + This module provides glue between core networking code and device + drivers in order to support hardware switch chips in very generic + meaning of the word "switch". This include devices supporting L2/L3 but + also various flow offloading chips, including switches embedded into + SR-IOV NICs. diff --git a/net/switchdev/Makefile b/net/switchdev/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5ed63ed324d009300b96ff435e40951aa06d2a31 --- /dev/null +++ b/net/switchdev/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Switch device API +# + +obj-$(CONFIG_NET_SWITCHDEV) += switchdev.o diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c new file mode 100644 index 0000000000000000000000000000000000000000..d162b21b14bd23b2f65a7dac64ed2cc1a7e14c3e --- /dev/null +++ b/net/switchdev/switchdev.c @@ -0,0 +1,52 @@ +/* + * net/switchdev/switchdev.c - Switch device API + * Copyright (c) 2014 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +/** + * netdev_switch_parent_id_get - Get ID of a switch + * @dev: port device + * @psid: switch ID + * + * Get ID of a switch this port is part of. + */ +int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + const struct net_device_ops *ops = dev->netdev_ops; + + if (!ops->ndo_switch_parent_id_get) + return -EOPNOTSUPP; + return ops->ndo_switch_parent_id_get(dev, psid); +} +EXPORT_SYMBOL(netdev_switch_parent_id_get); + +/** + * netdev_switch_port_stp_update - Notify switch device port of STP + * state change + * @dev: port device + * @state: port STP state + * + * Notify switch device port of bridge port STP state change. + */ +int netdev_switch_port_stp_update(struct net_device *dev, u8 state) +{ + const struct net_device_ops *ops = dev->netdev_ops; + + if (!ops->ndo_switch_port_stp_update) + return -EOPNOTSUPP; + WARN_ON(!ops->ndo_switch_parent_id_get); + return ops->ndo_switch_port_stp_update(dev, state); +} +EXPORT_SYMBOL(netdev_switch_port_stp_update); diff --git a/net/tipc/Makefile b/net/tipc/Makefile index b8a13caad59a518dddd2a8a50ad3a9a04f363cae..333e4592772ced97b3a636c0b7a854f795a140cd 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile @@ -7,8 +7,8 @@ obj-$(CONFIG_TIPC) := tipc.o tipc-y += addr.o bcast.o bearer.o config.o \ core.o link.o discover.o msg.o \ name_distr.o subscr.o name_table.o net.o \ - netlink.o node.o node_subscr.o \ - socket.o log.o eth_media.o server.o + netlink.o node.o socket.o log.o eth_media.o \ + server.o tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o tipc-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index b8670bf262e249dea10d463fb3b7d2e926ff747e..96ceefeb9daf4eb780cd168b4d008418c0055dd9 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -217,12 +217,13 @@ struct tipc_node *tipc_bclink_retransmit_to(void) */ static void bclink_retransmit_pkt(u32 after, u32 to) { - struct sk_buff *buf; + struct sk_buff *skb; - buf = bcl->first_out; - while (buf && less_eq(buf_seqno(buf), after)) - buf = buf->next; - tipc_link_retransmit(bcl, buf, mod(to - after)); + skb_queue_walk(&bcl->outqueue, skb) { + if (more(buf_seqno(skb), after)) + break; + } + tipc_link_retransmit(bcl, skb, mod(to - after)); } /** @@ -232,8 +233,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to) */ void tipc_bclink_wakeup_users(void) { - while (skb_queue_len(&bclink->link.waiting_sks)) - tipc_sk_rcv(skb_dequeue(&bclink->link.waiting_sks)); + struct sk_buff *skb; + + while ((skb = skb_dequeue(&bclink->link.waiting_sks))) + tipc_sk_rcv(skb); + } /** @@ -245,14 +249,14 @@ void tipc_bclink_wakeup_users(void) */ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) { - struct sk_buff *crs; + struct sk_buff *skb, *tmp; struct sk_buff *next; unsigned int released = 0; tipc_bclink_lock(); /* Bail out if tx queue is empty (no clean up is required) */ - crs = bcl->first_out; - if (!crs) + skb = skb_peek(&bcl->outqueue); + if (!skb) goto exit; /* Determine which messages need to be acknowledged */ @@ -271,43 +275,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) * Bail out if specified sequence number does not correspond * to a message that has been sent and not yet acknowledged */ - if (less(acked, buf_seqno(crs)) || + if (less(acked, buf_seqno(skb)) || less(bcl->fsm_msg_cnt, acked) || less_eq(acked, n_ptr->bclink.acked)) goto exit; } /* Skip over packets that node has previously acknowledged */ - while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) - crs = crs->next; + skb_queue_walk(&bcl->outqueue, skb) { + if (more(buf_seqno(skb), n_ptr->bclink.acked)) + break; + } /* Update packets that node is now acknowledging */ + skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) { + if (more(buf_seqno(skb), acked)) + break; - while (crs && less_eq(buf_seqno(crs), acked)) { - next = crs->next; - - if (crs != bcl->next_out) - bcbuf_decr_acks(crs); - else { - bcbuf_set_acks(crs, 0); + next = tipc_skb_queue_next(&bcl->outqueue, skb); + if (skb != bcl->next_out) { + bcbuf_decr_acks(skb); + } else { + bcbuf_set_acks(skb, 0); bcl->next_out = next; bclink_set_last_sent(); } - if (bcbuf_acks(crs) == 0) { - bcl->first_out = next; - bcl->out_queue_size--; - kfree_skb(crs); + if (bcbuf_acks(skb) == 0) { + __skb_unlink(skb, &bcl->outqueue); + kfree_skb(skb); released = 1; } - crs = next; } n_ptr->bclink.acked = acked; /* Try resolving broadcast link congestion, if necessary */ - if (unlikely(bcl->next_out)) { - tipc_link_push_queue(bcl); + tipc_link_push_packets(bcl); bclink_set_last_sent(); } if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks))) @@ -327,19 +331,16 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) struct sk_buff *buf; /* Ignore "stale" link state info */ - if (less_eq(last_sent, n_ptr->bclink.last_in)) return; /* Update link synchronization state; quit if in sync */ - bclink_update_last_sent(n_ptr, last_sent); if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in) return; /* Update out-of-sync state; quit if loss is still unconfirmed */ - if ((++n_ptr->bclink.oos_state) == 1) { if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2)) return; @@ -347,15 +348,15 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) } /* Don't NACK if one has been recently sent (or seen) */ - if (n_ptr->bclink.oos_state & 0x1) return; /* Send NACK */ - buf = tipc_buf_acquire(INT_H_SIZE); if (buf) { struct tipc_msg *msg = buf_msg(buf); + struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue); + u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, n_ptr->addr); @@ -363,9 +364,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) msg_set_mc_netid(msg, tipc_net_id); msg_set_bcast_ack(msg, n_ptr->bclink.last_in); msg_set_bcgap_after(msg, n_ptr->bclink.last_in); - msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head - ? buf_seqno(n_ptr->bclink.deferred_head) - 1 - : n_ptr->bclink.last_sent); + msg_set_bcgap_to(msg, to); tipc_bclink_lock(); tipc_bearer_send(MAX_BEARERS, buf, NULL); @@ -402,20 +401,20 @@ static void bclink_peek_nack(struct tipc_msg *msg) /* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster * and to identified node local sockets - * @buf: chain of buffers containing message + * @list: chain of buffers containing message * Consumes the buffer chain, except when returning -ELINKCONG * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE */ -int tipc_bclink_xmit(struct sk_buff *buf) +int tipc_bclink_xmit(struct sk_buff_head *list) { int rc = 0; int bc = 0; - struct sk_buff *clbuf; + struct sk_buff *skb; /* Prepare clone of message for local node */ - clbuf = tipc_msg_reassemble(buf); - if (unlikely(!clbuf)) { - kfree_skb_list(buf); + skb = tipc_msg_reassemble(list); + if (unlikely(!skb)) { + __skb_queue_purge(list); return -EHOSTUNREACH; } @@ -423,11 +422,13 @@ int tipc_bclink_xmit(struct sk_buff *buf) if (likely(bclink)) { tipc_bclink_lock(); if (likely(bclink->bcast_nodes.count)) { - rc = __tipc_link_xmit(bcl, buf); + rc = __tipc_link_xmit(bcl, list); if (likely(!rc)) { + u32 len = skb_queue_len(&bcl->outqueue); + bclink_set_last_sent(); bcl->stats.queue_sz_counts++; - bcl->stats.accu_queue_sz += bcl->out_queue_size; + bcl->stats.accu_queue_sz += len; } bc = 1; } @@ -435,13 +436,13 @@ int tipc_bclink_xmit(struct sk_buff *buf) } if (unlikely(!bc)) - kfree_skb_list(buf); + __skb_queue_purge(list); /* Deliver message clone */ if (likely(!rc)) - tipc_sk_mcast_rcv(clbuf); + tipc_sk_mcast_rcv(skb); else - kfree_skb(clbuf); + kfree_skb(skb); return rc; } @@ -462,7 +463,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) * Unicast an ACK periodically, ensuring that * all nodes in the cluster don't ACK at the same time */ - if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { tipc_link_proto_xmit(node->active_links[node->addr & 1], STATE_MSG, 0, 0, 0, 0, 0); @@ -484,7 +484,6 @@ void tipc_bclink_rcv(struct sk_buff *buf) int deferred = 0; /* Screen out unwanted broadcast messages */ - if (msg_mc_netid(msg) != tipc_net_id) goto exit; @@ -497,7 +496,6 @@ void tipc_bclink_rcv(struct sk_buff *buf) goto unlock; /* Handle broadcast protocol message */ - if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { if (msg_type(msg) != STATE_MSG) goto unlock; @@ -518,14 +516,12 @@ void tipc_bclink_rcv(struct sk_buff *buf) } /* Handle in-sequence broadcast message */ - seqno = msg_seqno(msg); next_in = mod(node->bclink.last_in + 1); if (likely(seqno == next_in)) { receive: /* Deliver message to destination */ - if (likely(msg_isdata(msg))) { tipc_bclink_lock(); bclink_accept_pkt(node, seqno); @@ -574,7 +570,6 @@ receive: buf = NULL; /* Determine new synchronization state */ - tipc_node_lock(node); if (unlikely(!tipc_node_is_up(node))) goto unlock; @@ -582,33 +577,26 @@ receive: if (node->bclink.last_in == node->bclink.last_sent) goto unlock; - if (!node->bclink.deferred_head) { + if (skb_queue_empty(&node->bclink.deferred_queue)) { node->bclink.oos_state = 1; goto unlock; } - msg = buf_msg(node->bclink.deferred_head); + msg = buf_msg(skb_peek(&node->bclink.deferred_queue)); seqno = msg_seqno(msg); next_in = mod(next_in + 1); if (seqno != next_in) goto unlock; /* Take in-sequence message from deferred queue & deliver it */ - - buf = node->bclink.deferred_head; - node->bclink.deferred_head = buf->next; - buf->next = NULL; - node->bclink.deferred_size--; + buf = __skb_dequeue(&node->bclink.deferred_queue); goto receive; } /* Handle out-of-sequence broadcast message */ - if (less(next_in, seqno)) { - deferred = tipc_link_defer_pkt(&node->bclink.deferred_head, - &node->bclink.deferred_tail, + deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue, buf); - node->bclink.deferred_size += deferred; bclink_update_last_sent(node, seqno); buf = NULL; } @@ -767,6 +755,118 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action) tipc_bclink_unlock(); } +static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, + struct tipc_stats *stats) +{ + int i; + struct nlattr *nest; + + struct nla_map { + __u32 key; + __u32 val; + }; + + struct nla_map map[] = { + {TIPC_NLA_STATS_RX_INFO, stats->recv_info}, + {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, + {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, + {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, + {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, + {TIPC_NLA_STATS_TX_INFO, stats->sent_info}, + {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, + {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, + {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, + {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled}, + {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks}, + {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv}, + {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks}, + {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks}, + {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted}, + {TIPC_NLA_STATS_DUPLICATES, stats->duplicates}, + {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs}, + {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz}, + {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ? + (stats->accu_queue_sz / stats->queue_sz_counts) : 0} + }; + + nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS); + if (!nest) + return -EMSGSIZE; + + for (i = 0; i < ARRAY_SIZE(map); i++) + if (nla_put_u32(skb, map[i].key, map[i].val)) + goto msg_full; + + nla_nest_end(skb, nest); + + return 0; +msg_full: + nla_nest_cancel(skb, nest); + + return -EMSGSIZE; +} + +int tipc_nl_add_bc_link(struct tipc_nl_msg *msg) +{ + int err; + void *hdr; + struct nlattr *attrs; + struct nlattr *prop; + + if (!bcl) + return 0; + + tipc_bclink_lock(); + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_LINK_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); + if (!attrs) + goto msg_full; + + /* The broadcast link is always up */ + if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) + goto attr_msg_full; + + if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST)) + goto attr_msg_full; + if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no)) + goto attr_msg_full; + + prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); + if (!prop) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0])) + goto prop_msg_full; + nla_nest_end(msg->skb, prop); + + err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); + if (err) + goto attr_msg_full; + + tipc_bclink_unlock(); + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +prop_msg_full: + nla_nest_cancel(msg->skb, prop); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + tipc_bclink_unlock(); + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} int tipc_bclink_stats(char *buf, const u32 buf_size) { @@ -851,7 +951,9 @@ int tipc_bclink_init(void) sprintf(bcbearer->media.name, "tipc-broadcast"); spin_lock_init(&bclink->lock); - __skb_queue_head_init(&bcl->waiting_sks); + __skb_queue_head_init(&bcl->outqueue); + __skb_queue_head_init(&bcl->deferred_queue); + skb_queue_head_init(&bcl->waiting_sks); bcl->next_out_no = 1; spin_lock_init(&bclink->node.lock); __skb_queue_head_init(&bclink->node.waiting_sks); diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index e7b0f85a82bc82a19573b0f3f4158814ba42b7bd..644d79129fbaeb1ac3022fd02f9a6c4dafca9b7d 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -37,6 +37,8 @@ #ifndef _TIPC_BCAST_H #define _TIPC_BCAST_H +#include "netlink.h" + #define MAX_NODES 4096 #define WSIZE 32 #define TIPC_BCLINK_RESET 1 @@ -98,6 +100,8 @@ int tipc_bclink_reset_stats(void); int tipc_bclink_set_queue_limits(u32 limit); void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); uint tipc_bclink_get_mtu(void); -int tipc_bclink_xmit(struct sk_buff *buf); +int tipc_bclink_xmit(struct sk_buff_head *list); void tipc_bclink_wakeup_users(void); +int tipc_nl_add_bc_link(struct tipc_nl_msg *msg); + #endif diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 264474394f9f75994205b751e734b632d21fbc02..463db5b15b8b6ca662447a63b99dcdbe75de0531 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -1,7 +1,7 @@ /* * net/tipc/bearer.c: TIPC bearer code * - * Copyright (c) 1996-2006, 2013, Ericsson AB + * Copyright (c) 1996-2006, 2013-2014, Ericsson AB * Copyright (c) 2004-2006, 2010-2013, Wind River Systems * All rights reserved. * @@ -37,6 +37,7 @@ #include "core.h" #include "config.h" #include "bearer.h" +#include "link.h" #include "discover.h" #define MAX_ADDR_STR 60 @@ -49,6 +50,23 @@ static struct tipc_media * const media_info_array[] = { NULL }; +static const struct nla_policy +tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = { + [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_BEARER_NAME] = { + .type = NLA_STRING, + .len = TIPC_MAX_BEARER_NAME + }, + [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED }, + [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 } +}; + +static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = { + [TIPC_NLA_MEDIA_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_MEDIA_NAME] = { .type = NLA_STRING }, + [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED } +}; + struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1]; static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down); @@ -627,3 +645,430 @@ void tipc_bearer_stop(void) } } } + +/* Caller should hold rtnl_lock to protect the bearer */ +static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, + struct tipc_bearer *bearer) +{ + void *hdr; + struct nlattr *attrs; + struct nlattr *prop; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_BEARER_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_BEARER); + if (!attrs) + goto msg_full; + + if (nla_put_string(msg->skb, TIPC_NLA_BEARER_NAME, bearer->name)) + goto attr_msg_full; + + prop = nla_nest_start(msg->skb, TIPC_NLA_BEARER_PROP); + if (!prop) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, bearer->priority)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, bearer->tolerance)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bearer->window)) + goto prop_msg_full; + + nla_nest_end(msg->skb, prop); + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +prop_msg_full: + nla_nest_cancel(msg->skb, prop); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int i = cb->args[0]; + struct tipc_bearer *bearer; + struct tipc_nl_msg msg; + + if (i == MAX_BEARERS) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + rtnl_lock(); + for (i = 0; i < MAX_BEARERS; i++) { + bearer = rtnl_dereference(bearer_list[i]); + if (!bearer) + continue; + + err = __tipc_nl_add_bearer(&msg, bearer); + if (err) + break; + } + rtnl_unlock(); + + cb->args[0] = i; + return skb->len; +} + +int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct sk_buff *rep; + struct tipc_bearer *bearer; + struct tipc_nl_msg msg; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, + info->attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!rep) + return -ENOMEM; + + msg.skb = rep; + msg.portid = info->snd_portid; + msg.seq = info->snd_seq; + + rtnl_lock(); + bearer = tipc_bearer_find(name); + if (!bearer) { + err = -EINVAL; + goto err_out; + } + + err = __tipc_nl_add_bearer(&msg, bearer); + if (err) + goto err_out; + rtnl_unlock(); + + return genlmsg_reply(rep, info); +err_out: + rtnl_unlock(); + nlmsg_free(rep); + + return err; +} + +int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct tipc_bearer *bearer; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, + info->attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + rtnl_lock(); + bearer = tipc_bearer_find(name); + if (!bearer) { + rtnl_unlock(); + return -EINVAL; + } + + bearer_disable(bearer, false); + rtnl_unlock(); + + return 0; +} + +int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *bearer; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + u32 domain; + u32 prio; + + prio = TIPC_MEDIA_LINK_PRI; + domain = tipc_own_addr & TIPC_CLUSTER_MASK; + + if (!info->attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, + info->attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + + bearer = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + if (attrs[TIPC_NLA_BEARER_DOMAIN]) + domain = nla_get_u32(attrs[TIPC_NLA_BEARER_DOMAIN]); + + if (attrs[TIPC_NLA_BEARER_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; + + err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], + props); + if (err) + return err; + + if (props[TIPC_NLA_PROP_PRIO]) + prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + } + + rtnl_lock(); + err = tipc_enable_bearer(bearer, domain, prio); + if (err) { + rtnl_unlock(); + return err; + } + rtnl_unlock(); + + return 0; +} + +int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct tipc_bearer *b; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, + info->attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + rtnl_lock(); + b = tipc_bearer_find(name); + if (!b) { + rtnl_unlock(); + return -EINVAL; + } + + if (attrs[TIPC_NLA_BEARER_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; + + err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], + props); + if (err) { + rtnl_unlock(); + return err; + } + + if (props[TIPC_NLA_PROP_TOL]) + b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); + if (props[TIPC_NLA_PROP_PRIO]) + b->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + if (props[TIPC_NLA_PROP_WIN]) + b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + } + rtnl_unlock(); + + return 0; +} + +static int __tipc_nl_add_media(struct tipc_nl_msg *msg, + struct tipc_media *media) +{ + void *hdr; + struct nlattr *attrs; + struct nlattr *prop; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_MEDIA_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_MEDIA); + if (!attrs) + goto msg_full; + + if (nla_put_string(msg->skb, TIPC_NLA_MEDIA_NAME, media->name)) + goto attr_msg_full; + + prop = nla_nest_start(msg->skb, TIPC_NLA_MEDIA_PROP); + if (!prop) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, media->priority)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, media->tolerance)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, media->window)) + goto prop_msg_full; + + nla_nest_end(msg->skb, prop); + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +prop_msg_full: + nla_nest_cancel(msg->skb, prop); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int i = cb->args[0]; + struct tipc_nl_msg msg; + + if (i == MAX_MEDIA) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + rtnl_lock(); + for (; media_info_array[i] != NULL; i++) { + err = __tipc_nl_add_media(&msg, media_info_array[i]); + if (err) + break; + } + rtnl_unlock(); + + cb->args[0] = i; + return skb->len; +} + +int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct tipc_nl_msg msg; + struct tipc_media *media; + struct sk_buff *rep; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_MEDIA]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX, + info->attrs[TIPC_NLA_MEDIA], + tipc_nl_media_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_MEDIA_NAME]) + return -EINVAL; + name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); + + rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!rep) + return -ENOMEM; + + msg.skb = rep; + msg.portid = info->snd_portid; + msg.seq = info->snd_seq; + + rtnl_lock(); + media = tipc_media_find(name); + if (!media) { + err = -EINVAL; + goto err_out; + } + + err = __tipc_nl_add_media(&msg, media); + if (err) + goto err_out; + rtnl_unlock(); + + return genlmsg_reply(rep, info); +err_out: + rtnl_unlock(); + nlmsg_free(rep); + + return err; +} + +int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct tipc_media *m; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_MEDIA]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX, + info->attrs[TIPC_NLA_MEDIA], + tipc_nl_media_policy); + + if (!attrs[TIPC_NLA_MEDIA_NAME]) + return -EINVAL; + name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); + + rtnl_lock(); + m = tipc_media_find(name); + if (!m) { + rtnl_unlock(); + return -EINVAL; + } + + if (attrs[TIPC_NLA_MEDIA_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; + + err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP], + props); + if (err) { + rtnl_unlock(); + return err; + } + + if (props[TIPC_NLA_PROP_TOL]) + m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); + if (props[TIPC_NLA_PROP_PRIO]) + m->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + if (props[TIPC_NLA_PROP_WIN]) + m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + } + rtnl_unlock(); + + return 0; +} diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 78fccc49de23c1e6dc20c1799d629c4083f6f80c..2c1230ac5dfe8d66642b30e144d8015ef6a76ed5 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -1,7 +1,7 @@ /* * net/tipc/bearer.h: Include file for TIPC bearer code * - * Copyright (c) 1996-2006, 2013, Ericsson AB + * Copyright (c) 1996-2006, 2013-2014, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -38,6 +38,8 @@ #define _TIPC_BEARER_H #include "bcast.h" +#include "netlink.h" +#include #define MAX_BEARERS 2 #define MAX_MEDIA 2 @@ -163,7 +165,7 @@ extern struct tipc_bearer __rcu *bearer_list[]; * TIPC routines available to supported media types */ -void tipc_rcv(struct sk_buff *buf, struct tipc_bearer *tb_ptr); +void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr); int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority); int tipc_disable_bearer(const char *name); @@ -176,6 +178,16 @@ extern struct tipc_media eth_media_info; extern struct tipc_media ib_media_info; #endif +int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); + +int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); + int tipc_media_set_priority(const char *name, u32 new_value); int tipc_media_set_window(const char *name, u32 new_value); void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); diff --git a/net/tipc/core.h b/net/tipc/core.h index f773b148722f7e51195d525a55e2ae18577c3c3e..84602137ce20f1fe051d7c3ebd2a8fafa10c2b43 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -41,6 +41,7 @@ #include #include +#include #include #include #include @@ -191,6 +192,7 @@ struct tipc_skb_cb { struct sk_buff *tail; bool deferred; bool wakeup_pending; + bool bundling; u16 chain_sz; u16 chain_imp; }; diff --git a/net/tipc/link.c b/net/tipc/link.c index 1db162aa64a5d0540cfc3538daa83056769776fc..23bcc1132365128de97ee2de41c9465b1b5d03ff 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -36,10 +36,12 @@ #include "core.h" #include "link.h" +#include "bcast.h" #include "socket.h" #include "name_distr.h" #include "discover.h" #include "config.h" +#include "netlink.h" #include @@ -50,6 +52,30 @@ static const char *link_co_err = "Link changeover error, "; static const char *link_rst_msg = "Resetting link "; static const char *link_unk_evt = "Unknown link event "; +static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { + [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_LINK_NAME] = { + .type = NLA_STRING, + .len = TIPC_MAX_LINK_NAME + }, + [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 }, + [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG }, + [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG }, + [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG }, + [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED }, + [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED }, + [TIPC_NLA_LINK_RX] = { .type = NLA_U32 }, + [TIPC_NLA_LINK_TX] = { .type = NLA_U32 } +}; + +/* Properties valid for media, bearar and link */ +static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { + [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 }, + [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 }, + [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 } +}; + /* * Out-of-range value for link session numbers */ @@ -123,18 +149,6 @@ static void link_init_max_pkt(struct tipc_link *l_ptr) l_ptr->max_pkt_probes = 0; } -static u32 link_next_sent(struct tipc_link *l_ptr) -{ - if (l_ptr->next_out) - return buf_seqno(l_ptr->next_out); - return mod(l_ptr->next_out_no); -} - -static u32 link_last_sent(struct tipc_link *l_ptr) -{ - return mod(link_next_sent(l_ptr) - 1); -} - /* * Simple non-static link routines (i.e. referenced outside this file) */ @@ -157,14 +171,17 @@ int tipc_link_is_active(struct tipc_link *l_ptr) */ static void link_timeout(struct tipc_link *l_ptr) { + struct sk_buff *skb; + tipc_node_lock(l_ptr->owner); /* update counters used in statistical profiling of send traffic */ - l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; + l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue); l_ptr->stats.queue_sz_counts++; - if (l_ptr->first_out) { - struct tipc_msg *msg = buf_msg(l_ptr->first_out); + skb = skb_peek(&l_ptr->outqueue); + if (skb) { + struct tipc_msg *msg = buf_msg(skb); u32 length = msg_size(msg); if ((msg_user(msg) == MSG_FRAGMENTER) && @@ -192,11 +209,10 @@ static void link_timeout(struct tipc_link *l_ptr) } /* do all other link processing performed on a periodic basis */ - link_state_event(l_ptr, TIMEOUT_EVT); if (l_ptr->next_out) - tipc_link_push_queue(l_ptr); + tipc_link_push_packets(l_ptr); tipc_node_unlock(l_ptr->owner); } @@ -224,9 +240,10 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, char addr_string[16]; u32 peer = n_ptr->addr; - if (n_ptr->link_cnt >= 2) { + if (n_ptr->link_cnt >= MAX_BEARERS) { tipc_addr_string_fill(addr_string, n_ptr->addr); - pr_err("Attempt to establish third link to %s\n", addr_string); + pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n", + n_ptr->link_cnt, addr_string, MAX_BEARERS); return NULL; } @@ -274,7 +291,9 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, link_init_max_pkt(l_ptr); l_ptr->next_out_no = 1; - __skb_queue_head_init(&l_ptr->waiting_sks); + __skb_queue_head_init(&l_ptr->outqueue); + __skb_queue_head_init(&l_ptr->deferred_queue); + skb_queue_head_init(&l_ptr->waiting_sks); link_reset_statistics(l_ptr); @@ -339,7 +358,7 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport, return false; TIPC_SKB_CB(buf)->chain_sz = chain_sz; TIPC_SKB_CB(buf)->chain_imp = imp; - __skb_queue_tail(&link->waiting_sks, buf); + skb_queue_tail(&link->waiting_sks, buf); link->stats.link_congs++; return true; } @@ -352,29 +371,18 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport, */ static void link_prepare_wakeup(struct tipc_link *link) { - struct sk_buff_head *wq = &link->waiting_sks; - struct sk_buff *buf; - uint pend_qsz = link->out_queue_size; + uint pend_qsz = skb_queue_len(&link->outqueue); + struct sk_buff *skb, *tmp; - for (buf = skb_peek(wq); buf; buf = skb_peek(wq)) { - if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(buf)->chain_imp]) + skb_queue_walk_safe(&link->waiting_sks, skb, tmp) { + if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp]) break; - pend_qsz += TIPC_SKB_CB(buf)->chain_sz; - __skb_queue_tail(&link->owner->waiting_sks, __skb_dequeue(wq)); + pend_qsz += TIPC_SKB_CB(skb)->chain_sz; + skb_unlink(skb, &link->waiting_sks); + skb_queue_tail(&link->owner->waiting_sks, skb); } } -/** - * link_release_outqueue - purge link's outbound message queue - * @l_ptr: pointer to link - */ -static void link_release_outqueue(struct tipc_link *l_ptr) -{ - kfree_skb_list(l_ptr->first_out); - l_ptr->first_out = NULL; - l_ptr->out_queue_size = 0; -} - /** * tipc_link_reset_fragments - purge link's inbound message fragments queue * @l_ptr: pointer to link @@ -391,11 +399,9 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) */ void tipc_link_purge_queues(struct tipc_link *l_ptr) { - kfree_skb_list(l_ptr->oldest_deferred_in); - kfree_skb_list(l_ptr->first_out); + __skb_queue_purge(&l_ptr->deferred_queue); + __skb_queue_purge(&l_ptr->outqueue); tipc_link_reset_fragments(l_ptr); - kfree_skb(l_ptr->proto_msg_queue); - l_ptr->proto_msg_queue = NULL; } void tipc_link_reset(struct tipc_link *l_ptr) @@ -427,25 +433,16 @@ void tipc_link_reset(struct tipc_link *l_ptr) } /* Clean up all queues: */ - link_release_outqueue(l_ptr); - kfree_skb(l_ptr->proto_msg_queue); - l_ptr->proto_msg_queue = NULL; - kfree_skb_list(l_ptr->oldest_deferred_in); + __skb_queue_purge(&l_ptr->outqueue); + __skb_queue_purge(&l_ptr->deferred_queue); if (!skb_queue_empty(&l_ptr->waiting_sks)) { skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks); owner->action_flags |= TIPC_WAKEUP_USERS; } - l_ptr->retransm_queue_head = 0; - l_ptr->retransm_queue_size = 0; - l_ptr->last_out = NULL; - l_ptr->first_out = NULL; l_ptr->next_out = NULL; l_ptr->unacked_window = 0; l_ptr->checkpoint = 1; l_ptr->next_out_no = 1; - l_ptr->deferred_inqueue_sz = 0; - l_ptr->oldest_deferred_in = NULL; - l_ptr->newest_deferred_in = NULL; l_ptr->fsm_msg_cnt = 0; l_ptr->stale_count = 0; link_reset_statistics(l_ptr); @@ -667,9 +664,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) * - For all other messages we discard the buffer and return -EHOSTUNREACH * - For TIPC internal messages we also reset the link */ -static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) +static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list) { - struct tipc_msg *msg = buf_msg(buf); + struct sk_buff *skb = skb_peek(list); + struct tipc_msg *msg = buf_msg(skb); uint imp = tipc_msg_tot_importance(msg); u32 oport = msg_tot_origport(msg); @@ -682,30 +680,30 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) goto drop; if (unlikely(msg_reroute_cnt(msg))) goto drop; - if (TIPC_SKB_CB(buf)->wakeup_pending) + if (TIPC_SKB_CB(skb)->wakeup_pending) return -ELINKCONG; - if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp)) + if (link_schedule_user(link, oport, skb_queue_len(list), imp)) return -ELINKCONG; drop: - kfree_skb_list(buf); + __skb_queue_purge(list); return -EHOSTUNREACH; } /** * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked * @link: link to use - * @buf: chain of buffers containing message + * @list: chain of buffers containing message + * * Consumes the buffer chain, except when returning -ELINKCONG * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket * user data messages) or -EHOSTUNREACH (all other messages/senders) * Only the socket functions tipc_send_stream() and tipc_send_packet() need * to act on the return value, since they may need to do more send attempts. */ -int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf) +int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list) { - struct tipc_msg *msg = buf_msg(buf); + struct tipc_msg *msg = buf_msg(skb_peek(list)); uint psz = msg_size(msg); - uint qsz = link->out_queue_size; uint sndlim = link->queue_limit[0]; uint imp = tipc_msg_tot_importance(msg); uint mtu = link->max_pkt; @@ -713,71 +711,83 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf) uint seqno = link->next_out_no; uint bc_last_in = link->owner->bclink.last_in; struct tipc_media_addr *addr = &link->media_addr; - struct sk_buff *next = buf->next; + struct sk_buff_head *outqueue = &link->outqueue; + struct sk_buff *skb, *tmp; /* Match queue limits against msg importance: */ - if (unlikely(qsz >= link->queue_limit[imp])) - return tipc_link_cong(link, buf); + if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp])) + return tipc_link_cong(link, list); /* Has valid packet limit been used ? */ if (unlikely(psz > mtu)) { - kfree_skb_list(buf); + __skb_queue_purge(list); return -EMSGSIZE; } /* Prepare each packet for sending, and add to outqueue: */ - while (buf) { - next = buf->next; - msg = buf_msg(buf); + skb_queue_walk_safe(list, skb, tmp) { + __skb_unlink(skb, list); + msg = buf_msg(skb); msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); msg_set_bcast_ack(msg, bc_last_in); - if (!link->first_out) { - link->first_out = buf; - } else if (qsz < sndlim) { - link->last_out->next = buf; - } else if (tipc_msg_bundle(link->last_out, buf, mtu)) { + if (skb_queue_len(outqueue) < sndlim) { + __skb_queue_tail(outqueue, skb); + tipc_bearer_send(link->bearer_id, skb, addr); + link->next_out = NULL; + link->unacked_window = 0; + } else if (tipc_msg_bundle(outqueue, skb, mtu)) { link->stats.sent_bundled++; - buf = next; - next = buf->next; continue; - } else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) { + } else if (tipc_msg_make_bundle(outqueue, skb, mtu, + link->addr)) { link->stats.sent_bundled++; link->stats.sent_bundles++; - link->last_out->next = buf; if (!link->next_out) - link->next_out = buf; + link->next_out = skb_peek_tail(outqueue); } else { - link->last_out->next = buf; + __skb_queue_tail(outqueue, skb); if (!link->next_out) - link->next_out = buf; - } - - /* Send packet if possible: */ - if (likely(++qsz <= sndlim)) { - tipc_bearer_send(link->bearer_id, buf, addr); - link->next_out = next; - link->unacked_window = 0; + link->next_out = skb; } seqno++; - link->last_out = buf; - buf = next; } link->next_out_no = seqno; - link->out_queue_size = qsz; return 0; } +static void skb2list(struct sk_buff *skb, struct sk_buff_head *list) +{ + __skb_queue_head_init(list); + __skb_queue_tail(list, skb); +} + +static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb) +{ + struct sk_buff_head head; + + skb2list(skb, &head); + return __tipc_link_xmit(link, &head); +} + +int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector) +{ + struct sk_buff_head head; + + skb2list(skb, &head); + return tipc_link_xmit(&head, dnode, selector); +} + /** * tipc_link_xmit() is the general link level function for message sending - * @buf: chain of buffers containing message + * @list: chain of buffers containing message * @dsz: amount of user data to be sent * @dnode: address of destination node * @selector: a number used for deterministic link selection * Consumes the buffer chain, except when returning -ELINKCONG * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE */ -int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) +int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector) { struct tipc_link *link = NULL; struct tipc_node *node; @@ -788,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) tipc_node_lock(node); link = node->active_links[selector & 1]; if (link) - rc = __tipc_link_xmit(link, buf); + rc = __tipc_link_xmit(link, list); tipc_node_unlock(node); } if (link) return rc; - if (likely(in_own_node(dnode))) - return tipc_sk_rcv(buf); + if (likely(in_own_node(dnode))) { + /* As a node local message chain never contains more than one + * buffer, we just need to dequeue one SKB buffer from the + * head list. + */ + return tipc_sk_rcv(__skb_dequeue(list)); + } + __skb_queue_purge(list); - kfree_skb_list(buf); return rc; } @@ -812,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) */ static void tipc_link_sync_xmit(struct tipc_link *link) { - struct sk_buff *buf; + struct sk_buff *skb; struct tipc_msg *msg; - buf = tipc_buf_acquire(INT_H_SIZE); - if (!buf) + skb = tipc_buf_acquire(INT_H_SIZE); + if (!skb) return; - msg = buf_msg(buf); + msg = buf_msg(skb); tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr); msg_set_last_bcast(msg, link->owner->bclink.acked); - __tipc_link_xmit(link, buf); + __tipc_link_xmit_skb(link, skb); } /* @@ -842,85 +857,46 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) kfree_skb(buf); } +struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + if (skb_queue_is_last(list, skb)) + return NULL; + return skb->next; +} + /* - * tipc_link_push_packet: Push one unsent packet to the media + * tipc_link_push_packets - push unsent packets to bearer + * + * Push out the unsent messages of a link where congestion + * has abated. Node is locked. + * + * Called with node locked */ -static u32 tipc_link_push_packet(struct tipc_link *l_ptr) -{ - struct sk_buff *buf = l_ptr->first_out; - u32 r_q_size = l_ptr->retransm_queue_size; - u32 r_q_head = l_ptr->retransm_queue_head; - - /* Step to position where retransmission failed, if any, */ - /* consider that buffers may have been released in meantime */ - if (r_q_size && buf) { - u32 last = lesser(mod(r_q_head + r_q_size), - link_last_sent(l_ptr)); - u32 first = buf_seqno(buf); - - while (buf && less(first, r_q_head)) { - first = mod(first + 1); - buf = buf->next; - } - l_ptr->retransm_queue_head = r_q_head = first; - l_ptr->retransm_queue_size = r_q_size = mod(last - first); - } - - /* Continue retransmission now, if there is anything: */ - if (r_q_size && buf) { - msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); - msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); - tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); - l_ptr->retransm_queue_head = mod(++r_q_head); - l_ptr->retransm_queue_size = --r_q_size; - l_ptr->stats.retransmitted++; - return 0; - } - - /* Send deferred protocol message, if any: */ - buf = l_ptr->proto_msg_queue; - if (buf) { - msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); - msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); - tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); - l_ptr->unacked_window = 0; - kfree_skb(buf); - l_ptr->proto_msg_queue = NULL; - return 0; - } +void tipc_link_push_packets(struct tipc_link *l_ptr) +{ + struct sk_buff_head *outqueue = &l_ptr->outqueue; + struct sk_buff *skb = l_ptr->next_out; + struct tipc_msg *msg; + u32 next, first; - /* Send one deferred data message, if send window not full: */ - buf = l_ptr->next_out; - if (buf) { - struct tipc_msg *msg = buf_msg(buf); - u32 next = msg_seqno(msg); - u32 first = buf_seqno(l_ptr->first_out); + skb_queue_walk_from(outqueue, skb) { + msg = buf_msg(skb); + next = msg_seqno(msg); + first = buf_seqno(skb_peek(outqueue)); if (mod(next - first) < l_ptr->queue_limit[0]) { msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); - tipc_bearer_send(l_ptr->bearer_id, buf, - &l_ptr->media_addr); if (msg_user(msg) == MSG_BUNDLER) - msg_set_type(msg, BUNDLE_CLOSED); - l_ptr->next_out = buf->next; - return 0; + TIPC_SKB_CB(skb)->bundling = false; + tipc_bearer_send(l_ptr->bearer_id, skb, + &l_ptr->media_addr); + l_ptr->next_out = tipc_skb_queue_next(outqueue, skb); + } else { + break; } } - return 1; -} - -/* - * push_queue(): push out the unsent messages of a link where - * congestion has abated. Node is locked - */ -void tipc_link_push_queue(struct tipc_link *l_ptr) -{ - u32 res; - - do { - res = tipc_link_push_packet(l_ptr); - } while (!res); } void tipc_link_reset_all(struct tipc_node *node) @@ -984,20 +960,20 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, } } -void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, +void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, u32 retransmits) { struct tipc_msg *msg; - if (!buf) + if (!skb) return; - msg = buf_msg(buf); + msg = buf_msg(skb); /* Detect repeated retransmit failures */ if (l_ptr->last_retransmitted == msg_seqno(msg)) { if (++l_ptr->stale_count > 100) { - link_retransmit_failure(l_ptr, buf); + link_retransmit_failure(l_ptr, skb); return; } } else { @@ -1005,38 +981,29 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, l_ptr->stale_count = 1; } - while (retransmits && (buf != l_ptr->next_out) && buf) { - msg = buf_msg(buf); + skb_queue_walk_from(&l_ptr->outqueue, skb) { + if (!retransmits || skb == l_ptr->next_out) + break; + msg = buf_msg(skb); msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); - tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); - buf = buf->next; + tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr); retransmits--; l_ptr->stats.retransmitted++; } - - l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; } -/** - * link_insert_deferred_queue - insert deferred messages back into receive chain - */ -static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, - struct sk_buff *buf) +static void link_retrieve_defq(struct tipc_link *link, + struct sk_buff_head *list) { u32 seq_no; - if (l_ptr->oldest_deferred_in == NULL) - return buf; + if (skb_queue_empty(&link->deferred_queue)) + return; - seq_no = buf_seqno(l_ptr->oldest_deferred_in); - if (seq_no == mod(l_ptr->next_in_no)) { - l_ptr->newest_deferred_in->next = buf; - buf = l_ptr->oldest_deferred_in; - l_ptr->oldest_deferred_in = NULL; - l_ptr->deferred_inqueue_sz = 0; - } - return buf; + seq_no = buf_seqno(skb_peek(&link->deferred_queue)); + if (seq_no == mod(link->next_in_no)) + skb_queue_splice_tail_init(&link->deferred_queue, list); } /** @@ -1096,43 +1063,42 @@ static int link_recv_buf_validate(struct sk_buff *buf) /** * tipc_rcv - process TIPC packets/messages arriving from off-node - * @head: pointer to message buffer chain + * @skb: TIPC packet * @b_ptr: pointer to bearer message arrived on * * Invoked with no locks held. Bearer pointer must point to a valid bearer * structure (i.e. cannot be NULL), but bearer can be inactive. */ -void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) +void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr) { - while (head) { - struct tipc_node *n_ptr; - struct tipc_link *l_ptr; - struct sk_buff *crs; - struct sk_buff *buf = head; - struct tipc_msg *msg; - u32 seq_no; - u32 ackd; - u32 released = 0; + struct sk_buff_head head; + struct tipc_node *n_ptr; + struct tipc_link *l_ptr; + struct sk_buff *skb1, *tmp; + struct tipc_msg *msg; + u32 seq_no; + u32 ackd; + u32 released; - head = head->next; - buf->next = NULL; + skb2list(skb, &head); + while ((skb = __skb_dequeue(&head))) { /* Ensure message is well-formed */ - if (unlikely(!link_recv_buf_validate(buf))) + if (unlikely(!link_recv_buf_validate(skb))) goto discard; /* Ensure message data is a single contiguous unit */ - if (unlikely(skb_linearize(buf))) + if (unlikely(skb_linearize(skb))) goto discard; /* Handle arrival of a non-unicast link message */ - msg = buf_msg(buf); + msg = buf_msg(skb); if (unlikely(msg_non_seq(msg))) { if (msg_user(msg) == LINK_CONFIG) - tipc_disc_rcv(buf, b_ptr); + tipc_disc_rcv(skb, b_ptr); else - tipc_bclink_rcv(buf); + tipc_bclink_rcv(skb); continue; } @@ -1171,22 +1137,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) if (n_ptr->bclink.recv_permitted) tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); - crs = l_ptr->first_out; - while ((crs != l_ptr->next_out) && - less_eq(buf_seqno(crs), ackd)) { - struct sk_buff *next = crs->next; - kfree_skb(crs); - crs = next; - released++; - } - if (released) { - l_ptr->first_out = crs; - l_ptr->out_queue_size -= released; + released = 0; + skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) { + if (skb1 == l_ptr->next_out || + more(buf_seqno(skb1), ackd)) + break; + __skb_unlink(skb1, &l_ptr->outqueue); + kfree_skb(skb1); + released = 1; } /* Try sending any messages link endpoint has pending */ if (unlikely(l_ptr->next_out)) - tipc_link_push_queue(l_ptr); + tipc_link_push_packets(l_ptr); if (released && !skb_queue_empty(&l_ptr->waiting_sks)) { link_prepare_wakeup(l_ptr); @@ -1196,8 +1159,8 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) /* Process the incoming packet */ if (unlikely(!link_working_working(l_ptr))) { if (msg_user(msg) == LINK_PROTOCOL) { - tipc_link_proto_rcv(l_ptr, buf); - head = link_insert_deferred_queue(l_ptr, head); + tipc_link_proto_rcv(l_ptr, skb); + link_retrieve_defq(l_ptr, &head); tipc_node_unlock(n_ptr); continue; } @@ -1207,8 +1170,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) if (link_working_working(l_ptr)) { /* Re-insert buffer in front of queue */ - buf->next = head; - head = buf; + __skb_queue_head(&head, skb); tipc_node_unlock(n_ptr); continue; } @@ -1217,33 +1179,33 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) /* Link is now in state WORKING_WORKING */ if (unlikely(seq_no != mod(l_ptr->next_in_no))) { - link_handle_out_of_seq_msg(l_ptr, buf); - head = link_insert_deferred_queue(l_ptr, head); + link_handle_out_of_seq_msg(l_ptr, skb); + link_retrieve_defq(l_ptr, &head); tipc_node_unlock(n_ptr); continue; } l_ptr->next_in_no++; - if (unlikely(l_ptr->oldest_deferred_in)) - head = link_insert_deferred_queue(l_ptr, head); + if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue))) + link_retrieve_defq(l_ptr, &head); if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { l_ptr->stats.sent_acks++; tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); } - if (tipc_link_prepare_input(l_ptr, &buf)) { + if (tipc_link_prepare_input(l_ptr, &skb)) { tipc_node_unlock(n_ptr); continue; } tipc_node_unlock(n_ptr); - msg = buf_msg(buf); - if (tipc_link_input(l_ptr, buf) != 0) + + if (tipc_link_input(l_ptr, skb) != 0) goto discard; continue; unlock_discard: tipc_node_unlock(n_ptr); discard: - kfree_skb(buf); + kfree_skb(skb); } } @@ -1326,48 +1288,37 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf) * * Returns increase in queue length (i.e. 0 or 1) */ -u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, - struct sk_buff *buf) +u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb) { - struct sk_buff *queue_buf; - struct sk_buff **prev; - u32 seq_no = buf_seqno(buf); - - buf->next = NULL; + struct sk_buff *skb1; + u32 seq_no = buf_seqno(skb); /* Empty queue ? */ - if (*head == NULL) { - *head = *tail = buf; + if (skb_queue_empty(list)) { + __skb_queue_tail(list, skb); return 1; } /* Last ? */ - if (less(buf_seqno(*tail), seq_no)) { - (*tail)->next = buf; - *tail = buf; + if (less(buf_seqno(skb_peek_tail(list)), seq_no)) { + __skb_queue_tail(list, skb); return 1; } /* Locate insertion point in queue, then insert; discard if duplicate */ - prev = head; - queue_buf = *head; - for (;;) { - u32 curr_seqno = buf_seqno(queue_buf); + skb_queue_walk(list, skb1) { + u32 curr_seqno = buf_seqno(skb1); if (seq_no == curr_seqno) { - kfree_skb(buf); + kfree_skb(skb); return 0; } if (less(seq_no, curr_seqno)) break; - - prev = &queue_buf->next; - queue_buf = queue_buf->next; } - buf->next = queue_buf; - *prev = buf; + __skb_queue_before(list, skb1, skb); return 1; } @@ -1397,15 +1348,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, return; } - if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, - &l_ptr->newest_deferred_in, buf)) { - l_ptr->deferred_inqueue_sz++; + if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) { l_ptr->stats.deferred_recv++; TIPC_SKB_CB(buf)->deferred = true; - if ((l_ptr->deferred_inqueue_sz % 16) == 1) + if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1) tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); - } else + } else { l_ptr->stats.duplicates++; + } } /* @@ -1419,12 +1369,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, u32 msg_size = sizeof(l_ptr->proto_msg); int r_flag; - /* Discard any previous message that was deferred due to congestion */ - if (l_ptr->proto_msg_queue) { - kfree_skb(l_ptr->proto_msg_queue); - l_ptr->proto_msg_queue = NULL; - } - /* Don't send protocol message during link changeover */ if (l_ptr->exp_msg_count) return; @@ -1447,8 +1391,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, if (l_ptr->next_out) next_sent = buf_seqno(l_ptr->next_out); msg_set_next_sent(msg, next_sent); - if (l_ptr->oldest_deferred_in) { - u32 rec = buf_seqno(l_ptr->oldest_deferred_in); + if (!skb_queue_empty(&l_ptr->deferred_queue)) { + u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue)); gap = mod(rec - mod(l_ptr->next_in_no)); } msg_set_seq_gap(msg, gap); @@ -1636,7 +1580,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf) } if (msg_seq_gap(msg)) { l_ptr->stats.recv_nacks++; - tipc_link_retransmit(l_ptr, l_ptr->first_out, + tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue), msg_seq_gap(msg)); } break; @@ -1655,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, u32 selector) { struct tipc_link *tunnel; - struct sk_buff *buf; + struct sk_buff *skb; u32 length = msg_size(msg); tunnel = l_ptr->owner->active_links[selector & 1]; @@ -1664,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, return; } msg_set_size(tunnel_hdr, length + INT_H_SIZE); - buf = tipc_buf_acquire(length + INT_H_SIZE); - if (!buf) { + skb = tipc_buf_acquire(length + INT_H_SIZE); + if (!skb) { pr_warn("%sunable to send tunnel msg\n", link_co_err); return; } - skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); - skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); - __tipc_link_xmit(tunnel, buf); + skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE); + skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length); + __tipc_link_xmit_skb(tunnel, skb); } @@ -1683,10 +1627,10 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, */ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) { - u32 msgcount = l_ptr->out_queue_size; - struct sk_buff *crs = l_ptr->first_out; + u32 msgcount = skb_queue_len(&l_ptr->outqueue); struct tipc_link *tunnel = l_ptr->owner->active_links[0]; struct tipc_msg tunnel_hdr; + struct sk_buff *skb; int split_bundles; if (!tunnel) @@ -1697,14 +1641,12 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); msg_set_msgcnt(&tunnel_hdr, msgcount); - if (!l_ptr->first_out) { - struct sk_buff *buf; - - buf = tipc_buf_acquire(INT_H_SIZE); - if (buf) { - skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); + if (skb_queue_empty(&l_ptr->outqueue)) { + skb = tipc_buf_acquire(INT_H_SIZE); + if (skb) { + skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); msg_set_size(&tunnel_hdr, INT_H_SIZE); - __tipc_link_xmit(tunnel, buf); + __tipc_link_xmit_skb(tunnel, skb); } else { pr_warn("%sunable to send changeover msg\n", link_co_err); @@ -1715,8 +1657,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) split_bundles = (l_ptr->owner->active_links[0] != l_ptr->owner->active_links[1]); - while (crs) { - struct tipc_msg *msg = buf_msg(crs); + skb_queue_walk(&l_ptr->outqueue, skb) { + struct tipc_msg *msg = buf_msg(skb); if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { struct tipc_msg *m = msg_get_wrapped(msg); @@ -1734,7 +1676,6 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg, msg_link_selector(msg)); } - crs = crs->next; } } @@ -1750,17 +1691,16 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *tunnel) { - struct sk_buff *iter; + struct sk_buff *skb; struct tipc_msg tunnel_hdr; tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); - msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); + msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue)); msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); - iter = l_ptr->first_out; - while (iter) { - struct sk_buff *outbuf; - struct tipc_msg *msg = buf_msg(iter); + skb_queue_walk(&l_ptr->outqueue, skb) { + struct sk_buff *outskb; + struct tipc_msg *msg = buf_msg(skb); u32 length = msg_size(msg); if (msg_user(msg) == MSG_BUNDLER) @@ -1768,19 +1708,18 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); msg_set_size(&tunnel_hdr, length + INT_H_SIZE); - outbuf = tipc_buf_acquire(length + INT_H_SIZE); - if (outbuf == NULL) { + outskb = tipc_buf_acquire(length + INT_H_SIZE); + if (outskb == NULL) { pr_warn("%sunable to send duplicate msg\n", link_co_err); return; } - skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); - skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, + skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE); + skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data, length); - __tipc_link_xmit(tunnel, outbuf); + __tipc_link_xmit_skb(tunnel, outskb); if (!tipc_link_is_up(l_ptr)) return; - iter = iter->next; } } @@ -2375,3 +2314,435 @@ static void link_print(struct tipc_link *l_ptr, const char *str) else pr_cont("\n"); } + +/* Parse and validate nested (link) properties valid for media, bearer and link + */ +int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) +{ + int err; + + err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, + tipc_nl_prop_policy); + if (err) + return err; + + if (props[TIPC_NLA_PROP_PRIO]) { + u32 prio; + + prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + if (prio > TIPC_MAX_LINK_PRI) + return -EINVAL; + } + + if (props[TIPC_NLA_PROP_TOL]) { + u32 tol; + + tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); + if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) + return -EINVAL; + } + + if (props[TIPC_NLA_PROP_WIN]) { + u32 win; + + win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) + return -EINVAL; + } + + return 0; +} + +int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) +{ + int err; + int res = 0; + int bearer_id; + char *name; + struct tipc_link *link; + struct tipc_node *node; + struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; + + if (!info->attrs[TIPC_NLA_LINK]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, + info->attrs[TIPC_NLA_LINK], + tipc_nl_link_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_LINK_NAME]) + return -EINVAL; + + name = nla_data(attrs[TIPC_NLA_LINK_NAME]); + + node = tipc_link_find_owner(name, &bearer_id); + if (!node) + return -EINVAL; + + tipc_node_lock(node); + + link = node->links[bearer_id]; + if (!link) { + res = -EINVAL; + goto out; + } + + if (attrs[TIPC_NLA_LINK_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; + + err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], + props); + if (err) { + res = err; + goto out; + } + + if (props[TIPC_NLA_PROP_TOL]) { + u32 tol; + + tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); + link_set_supervision_props(link, tol); + tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0); + } + if (props[TIPC_NLA_PROP_PRIO]) { + u32 prio; + + prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + link->priority = prio; + tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0); + } + if (props[TIPC_NLA_PROP_WIN]) { + u32 win; + + win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + tipc_link_set_queue_limits(link, win); + } + } + +out: + tipc_node_unlock(node); + + return res; +} + +static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) +{ + int i; + struct nlattr *stats; + + struct nla_map { + u32 key; + u32 val; + }; + + struct nla_map map[] = { + {TIPC_NLA_STATS_RX_INFO, s->recv_info}, + {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, + {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, + {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, + {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, + {TIPC_NLA_STATS_TX_INFO, s->sent_info}, + {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, + {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, + {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, + {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, + {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? + s->msg_length_counts : 1}, + {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, + {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, + {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, + {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, + {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, + {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, + {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, + {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, + {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, + {TIPC_NLA_STATS_RX_STATES, s->recv_states}, + {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, + {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, + {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, + {TIPC_NLA_STATS_TX_STATES, s->sent_states}, + {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, + {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, + {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, + {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, + {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, + {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, + {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, + {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? + (s->accu_queue_sz / s->queue_sz_counts) : 0} + }; + + stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); + if (!stats) + return -EMSGSIZE; + + for (i = 0; i < ARRAY_SIZE(map); i++) + if (nla_put_u32(skb, map[i].key, map[i].val)) + goto msg_full; + + nla_nest_end(skb, stats); + + return 0; +msg_full: + nla_nest_cancel(skb, stats); + + return -EMSGSIZE; +} + +/* Caller should hold appropriate locks to protect the link */ +static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link) +{ + int err; + void *hdr; + struct nlattr *attrs; + struct nlattr *prop; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_LINK_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); + if (!attrs) + goto msg_full; + + if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, + tipc_cluster_mask(tipc_own_addr))) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no)) + goto attr_msg_full; + + if (tipc_link_is_up(link)) + if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) + goto attr_msg_full; + if (tipc_link_is_active(link)) + if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) + goto attr_msg_full; + + prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); + if (!prop) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, + link->queue_limit[TIPC_LOW_IMPORTANCE])) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) + goto prop_msg_full; + nla_nest_end(msg->skb, prop); + + err = __tipc_nl_add_stats(msg->skb, &link->stats); + if (err) + goto attr_msg_full; + + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +prop_msg_full: + nla_nest_cancel(msg->skb, prop); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +/* Caller should hold node lock */ +static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg, + struct tipc_node *node, + u32 *prev_link) +{ + u32 i; + int err; + + for (i = *prev_link; i < MAX_BEARERS; i++) { + *prev_link = i; + + if (!node->links[i]) + continue; + + err = __tipc_nl_add_link(msg, node->links[i]); + if (err) + return err; + } + *prev_link = 0; + + return 0; +} + +int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct tipc_node *node; + struct tipc_nl_msg msg; + u32 prev_node = cb->args[0]; + u32 prev_link = cb->args[1]; + int done = cb->args[2]; + int err; + + if (done) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + rcu_read_lock(); + + if (prev_node) { + node = tipc_node_find(prev_node); + if (!node) { + /* We never set seq or call nl_dump_check_consistent() + * this means that setting prev_seq here will cause the + * consistence check to fail in the netlink callback + * handler. Resulting in the last NLMSG_DONE message + * having the NLM_F_DUMP_INTR flag set. + */ + cb->prev_seq = 1; + goto out; + } + + list_for_each_entry_continue_rcu(node, &tipc_node_list, list) { + tipc_node_lock(node); + err = __tipc_nl_add_node_links(&msg, node, &prev_link); + tipc_node_unlock(node); + if (err) + goto out; + + prev_node = node->addr; + } + } else { + err = tipc_nl_add_bc_link(&msg); + if (err) + goto out; + + list_for_each_entry_rcu(node, &tipc_node_list, list) { + tipc_node_lock(node); + err = __tipc_nl_add_node_links(&msg, node, &prev_link); + tipc_node_unlock(node); + if (err) + goto out; + + prev_node = node->addr; + } + } + done = 1; +out: + rcu_read_unlock(); + + cb->args[0] = prev_node; + cb->args[1] = prev_link; + cb->args[2] = done; + + return skb->len; +} + +int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *ans_skb; + struct tipc_nl_msg msg; + struct tipc_link *link; + struct tipc_node *node; + char *name; + int bearer_id; + int err; + + if (!info->attrs[TIPC_NLA_LINK_NAME]) + return -EINVAL; + + name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); + node = tipc_link_find_owner(name, &bearer_id); + if (!node) + return -EINVAL; + + ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!ans_skb) + return -ENOMEM; + + msg.skb = ans_skb; + msg.portid = info->snd_portid; + msg.seq = info->snd_seq; + + tipc_node_lock(node); + link = node->links[bearer_id]; + if (!link) { + err = -EINVAL; + goto err_out; + } + + err = __tipc_nl_add_link(&msg, link); + if (err) + goto err_out; + + tipc_node_unlock(node); + + return genlmsg_reply(ans_skb, info); + +err_out: + tipc_node_unlock(node); + nlmsg_free(ans_skb); + + return err; +} + +int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *link_name; + unsigned int bearer_id; + struct tipc_link *link; + struct tipc_node *node; + struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; + + if (!info->attrs[TIPC_NLA_LINK]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, + info->attrs[TIPC_NLA_LINK], + tipc_nl_link_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_LINK_NAME]) + return -EINVAL; + + link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); + + if (strcmp(link_name, tipc_bclink_name) == 0) { + err = tipc_bclink_reset_stats(); + if (err) + return err; + return 0; + } + + node = tipc_link_find_owner(link_name, &bearer_id); + if (!node) + return -EINVAL; + + tipc_node_lock(node); + + link = node->links[bearer_id]; + if (!link) { + tipc_node_unlock(node); + return -EINVAL; + } + + link_reset_statistics(link); + + tipc_node_unlock(node); + + return 0; +} diff --git a/net/tipc/link.h b/net/tipc/link.h index b567a3427fda46e05a692b606f424da1a60eaa93..55812e87ca1e2a4b6cb460e067599edc9bbf158d 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -37,6 +37,7 @@ #ifndef _TIPC_LINK_H #define _TIPC_LINK_H +#include #include "msg.h" #include "node.h" @@ -118,20 +119,13 @@ struct tipc_stats { * @max_pkt: current maximum packet size for this link * @max_pkt_target: desired maximum packet size for this link * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target) - * @out_queue_size: # of messages in outbound message queue - * @first_out: ptr to first outbound message in queue - * @last_out: ptr to last outbound message in queue + * @outqueue: outbound message queue * @next_out_no: next sequence number to use for outbound messages * @last_retransmitted: sequence number of most recently retransmitted message * @stale_count: # of identical retransmit requests made by peer * @next_in_no: next sequence number to expect for inbound messages - * @deferred_inqueue_sz: # of messages in inbound message queue - * @oldest_deferred_in: ptr to first inbound message in queue - * @newest_deferred_in: ptr to last inbound message in queue + * @deferred_queue: deferred queue saved OOS b'cast message received from node * @unacked_window: # of inbound messages rx'd without ack'ing back to peer - * @proto_msg_queue: ptr to (single) outbound control message - * @retransm_queue_size: number of messages to retransmit - * @retransm_queue_head: sequence number of first message to retransmit * @next_out: ptr to first unsent outbound message in queue * @waiting_sks: linked list of sockets waiting for link congestion to abate * @long_msg_seq_no: next identifier to use for outbound fragmented messages @@ -175,24 +169,17 @@ struct tipc_link { u32 max_pkt_probes; /* Sending */ - u32 out_queue_size; - struct sk_buff *first_out; - struct sk_buff *last_out; + struct sk_buff_head outqueue; u32 next_out_no; u32 last_retransmitted; u32 stale_count; /* Reception */ u32 next_in_no; - u32 deferred_inqueue_sz; - struct sk_buff *oldest_deferred_in; - struct sk_buff *newest_deferred_in; + struct sk_buff_head deferred_queue; u32 unacked_window; /* Congestion handling */ - struct sk_buff *proto_msg_queue; - u32 retransm_queue_size; - u32 retransm_queue_head; struct sk_buff *next_out; struct sk_buff_head waiting_sks; @@ -226,18 +213,26 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, void tipc_link_reset_all(struct tipc_node *node); void tipc_link_reset(struct tipc_link *l_ptr); void tipc_link_reset_list(unsigned int bearer_id); -int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector); -int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf); +int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector); +int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector); +int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list); u32 tipc_link_get_max_pkt(u32 dest, u32 selector); void tipc_link_bundle_rcv(struct sk_buff *buf); void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); -void tipc_link_push_queue(struct tipc_link *l_ptr); -u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, - struct sk_buff *buf); +void tipc_link_push_packets(struct tipc_link *l_ptr); +u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf); void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *start, u32 retransmits); +struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb); + +int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]); /* * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) @@ -252,18 +247,14 @@ static inline u32 mod(u32 x) return x & 0xffffu; } -static inline int between(u32 lower, u32 upper, u32 n) +static inline int less_eq(u32 left, u32 right) { - if ((lower < n) && (n < upper)) - return 1; - if ((upper < lower) && ((n > lower) || (n < upper))) - return 1; - return 0; + return mod(right - left) < 32768u; } -static inline int less_eq(u32 left, u32 right) +static inline int more(u32 left, u32 right) { - return mod(right - left) < 32768u; + return !less_eq(left, right); } static inline int less(u32 left, u32 right) @@ -302,7 +293,7 @@ static inline int link_reset_reset(struct tipc_link *l_ptr) static inline int link_congested(struct tipc_link *l_ptr) { - return l_ptr->out_queue_size >= l_ptr->queue_limit[0]; + return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0]; } #endif diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 74745a47d72ae1c0ce84c6ff5b096682410a0789..a687b30a699cb651eaf7dd5f1c7d5fa3459bf9b5 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -91,7 +91,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, * @*headbuf: in: NULL for first frag, otherwise value returned from prev call * out: set when successful non-complete reassembly, otherwise NULL * @*buf: in: the buffer to append. Always defined - * out: head buf after sucessful complete reassembly, otherwise NULL + * out: head buf after successful complete reassembly, otherwise NULL * Returns 1 when reassembly complete, otherwise 0 */ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) @@ -162,15 +162,16 @@ err: /** * tipc_msg_build - create buffer chain containing specified header and data * @mhdr: Message header, to be prepended to data - * @iov: User data + * @m: User message * @offset: Posision in iov to start copying from * @dsz: Total length of user data * @pktmax: Max packet size that can be used - * @chain: Buffer or chain of buffers to be returned to caller + * @list: Buffer or chain of buffers to be returned to caller + * * Returns message data size or errno: -ENOMEM, -EFAULT */ -int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, - int offset, int dsz, int pktmax , struct sk_buff **chain) +int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, + int dsz, int pktmax, struct sk_buff_head *list) { int mhsz = msg_hdr_sz(mhdr); int msz = mhsz + dsz; @@ -179,22 +180,22 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, int pktrem = pktmax; int drem = dsz; struct tipc_msg pkthdr; - struct sk_buff *buf, *prev; + struct sk_buff *skb; char *pktpos; int rc; - uint chain_sz = 0; + msg_set_size(mhdr, msz); /* No fragmentation needed? */ if (likely(msz <= pktmax)) { - buf = tipc_buf_acquire(msz); - *chain = buf; - if (unlikely(!buf)) + skb = tipc_buf_acquire(msz); + if (unlikely(!skb)) return -ENOMEM; - skb_copy_to_linear_data(buf, mhdr, mhsz); - pktpos = buf->data + mhsz; - TIPC_SKB_CB(buf)->chain_sz = 1; - if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz)) + __skb_queue_tail(list, skb); + skb_copy_to_linear_data(skb, mhdr, mhsz); + pktpos = skb->data + mhsz; + if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, + dsz)) return dsz; rc = -EFAULT; goto error; @@ -207,15 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, msg_set_fragm_no(&pkthdr, pktno); /* Prepare first fragment */ - *chain = buf = tipc_buf_acquire(pktmax); - if (!buf) + skb = tipc_buf_acquire(pktmax); + if (!skb) return -ENOMEM; - chain_sz = 1; - pktpos = buf->data; - skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); + __skb_queue_tail(list, skb); + pktpos = skb->data; + skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); pktpos += INT_H_SIZE; pktrem -= INT_H_SIZE; - skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz); + skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); pktpos += mhsz; pktrem -= mhsz; @@ -223,7 +224,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, if (drem < pktrem) pktrem = drem; - if (memcpy_fromiovecend(pktpos, iov, offset, pktrem)) { + if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) { rc = -EFAULT; goto error; } @@ -238,43 +239,41 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, pktsz = drem + INT_H_SIZE; else pktsz = pktmax; - prev = buf; - buf = tipc_buf_acquire(pktsz); - if (!buf) { + skb = tipc_buf_acquire(pktsz); + if (!skb) { rc = -ENOMEM; goto error; } - chain_sz++; - prev->next = buf; + __skb_queue_tail(list, skb); msg_set_type(&pkthdr, FRAGMENT); msg_set_size(&pkthdr, pktsz); msg_set_fragm_no(&pkthdr, ++pktno); - skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); - pktpos = buf->data + INT_H_SIZE; + skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); + pktpos = skb->data + INT_H_SIZE; pktrem = pktsz - INT_H_SIZE; } while (1); - TIPC_SKB_CB(*chain)->chain_sz = chain_sz; - msg_set_type(buf_msg(buf), LAST_FRAGMENT); + msg_set_type(buf_msg(skb), LAST_FRAGMENT); return dsz; error: - kfree_skb_list(*chain); - *chain = NULL; + __skb_queue_purge(list); + __skb_queue_head_init(list); return rc; } /** * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one - * @bbuf: the existing buffer ("bundle") - * @buf: buffer to be appended + * @list: the buffer chain of the existing buffer ("bundle") + * @skb: buffer to be appended * @mtu: max allowable size for the bundle buffer * Consumes buffer if successful * Returns true if bundling could be performed, otherwise false */ -bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) +bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) { - struct tipc_msg *bmsg = buf_msg(bbuf); - struct tipc_msg *msg = buf_msg(buf); + struct sk_buff *bskb = skb_peek_tail(list); + struct tipc_msg *bmsg = buf_msg(bskb); + struct tipc_msg *msg = buf_msg(skb); unsigned int bsz = msg_size(bmsg); unsigned int msz = msg_size(msg); u32 start = align(bsz); @@ -289,35 +288,36 @@ bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) return false; if (likely(msg_user(bmsg) != MSG_BUNDLER)) return false; - if (likely(msg_type(bmsg) != BUNDLE_OPEN)) + if (likely(!TIPC_SKB_CB(bskb)->bundling)) return false; - if (unlikely(skb_tailroom(bbuf) < (pad + msz))) + if (unlikely(skb_tailroom(bskb) < (pad + msz))) return false; if (unlikely(max < (start + msz))) return false; - skb_put(bbuf, pad + msz); - skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz); + skb_put(bskb, pad + msz); + skb_copy_to_linear_data_offset(bskb, start, skb->data, msz); msg_set_size(bmsg, start + msz); msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); - bbuf->next = buf->next; - kfree_skb(buf); + kfree_skb(skb); return true; } /** * tipc_msg_make_bundle(): Create bundle buf and append message to its tail - * @buf: buffer to be appended and replaced - * @mtu: max allowable size for the bundle buffer, inclusive header + * @list: the buffer chain + * @skb: buffer to be appended and replaced + * @mtu: max allowable size for the bundle buffer, inclusive header * @dnode: destination node for message. (Not always present in header) * Replaces buffer if successful - * Returns true if sucess, otherwise false + * Returns true if success, otherwise false */ -bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) +bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, + u32 mtu, u32 dnode) { - struct sk_buff *bbuf; + struct sk_buff *bskb; struct tipc_msg *bmsg; - struct tipc_msg *msg = buf_msg(*buf); + struct tipc_msg *msg = buf_msg(skb); u32 msz = msg_size(msg); u32 max = mtu - INT_H_SIZE; @@ -330,20 +330,19 @@ bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) if (msz > (max / 2)) return false; - bbuf = tipc_buf_acquire(max); - if (!bbuf) + bskb = tipc_buf_acquire(max); + if (!bskb) return false; - skb_trim(bbuf, INT_H_SIZE); - bmsg = buf_msg(bbuf); - tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode); + skb_trim(bskb, INT_H_SIZE); + bmsg = buf_msg(bskb); + tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode); msg_set_seqno(bmsg, msg_seqno(msg)); msg_set_ack(bmsg, msg_ack(msg)); msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); - bbuf->next = (*buf)->next; - tipc_msg_bundle(bbuf, *buf, mtu); - *buf = bbuf; - return true; + TIPC_SKB_CB(bskb)->bundling = true; + __skb_queue_tail(list, bskb); + return tipc_msg_bundle(list, skb, mtu); } /** @@ -429,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) /* tipc_msg_reassemble() - clone a buffer chain of fragments and * reassemble the clones into one message */ -struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) +struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list) { - struct sk_buff *buf = chain; - struct sk_buff *frag = buf; + struct sk_buff *skb; + struct sk_buff *frag = NULL; struct sk_buff *head = NULL; int hdr_sz; /* Copy header if single buffer */ - if (!buf->next) { - hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf)); - return __pskb_copy(buf, hdr_sz, GFP_ATOMIC); + if (skb_queue_len(list) == 1) { + skb = skb_peek(list); + hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); + return __pskb_copy(skb, hdr_sz, GFP_ATOMIC); } /* Clone all fragments and reassemble */ - while (buf) { - frag = skb_clone(buf, GFP_ATOMIC); + skb_queue_walk(list, skb) { + frag = skb_clone(skb, GFP_ATOMIC); if (!frag) goto error; frag->next = NULL; @@ -452,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) break; if (!head) goto error; - buf = buf->next; } return frag; error: diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 0ea7b695ac4d891a7556ae2f08110d11306369f9..d5c83d7ecb479f30de9fe9b8f8aadbac5aa0c64b 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -464,11 +464,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) #define FRAGMENT 1 #define LAST_FRAGMENT 2 -/* Bundling protocol message types - */ -#define BUNDLE_OPEN 0 -#define BUNDLE_CLOSED 1 - /* * Link management protocol message types */ @@ -739,13 +734,14 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); -bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu); +bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu); -bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode); +bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, + u32 mtu, u32 dnode); -int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, - int offset, int dsz, int mtu , struct sk_buff **chain); +int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, + int dsz, int mtu, struct sk_buff_head *list); -struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain); +struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list); #endif diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 376d2bb51d8da19bd00ee48f4b374ca098f36413..ba6083dca95b97d61d73a4d1a1df13c31a9e728c 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -38,39 +38,6 @@ #include "link.h" #include "name_distr.h" -/** - * struct publ_list - list of publications made by this node - * @list: circular list of publications - * @list_size: number of entries in list - */ -struct publ_list { - struct list_head list; - u32 size; -}; - -static struct publ_list publ_zone = { - .list = LIST_HEAD_INIT(publ_zone.list), - .size = 0, -}; - -static struct publ_list publ_cluster = { - .list = LIST_HEAD_INIT(publ_cluster.list), - .size = 0, -}; - -static struct publ_list publ_node = { - .list = LIST_HEAD_INIT(publ_node.list), - .size = 0, -}; - -static struct publ_list *publ_lists[] = { - NULL, - &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */ - &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */ - &publ_node /* publ_lists[TIPC_NODE_SCOPE] */ -}; - - int sysctl_tipc_named_timeout __read_mostly = 2000; /** @@ -114,9 +81,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) return buf; } -void named_cluster_distribute(struct sk_buff *buf) +void named_cluster_distribute(struct sk_buff *skb) { - struct sk_buff *obuf; + struct sk_buff *oskb; struct tipc_node *node; u32 dnode; @@ -127,15 +94,15 @@ void named_cluster_distribute(struct sk_buff *buf) continue; if (!tipc_node_active_links(node)) continue; - obuf = skb_copy(buf, GFP_ATOMIC); - if (!obuf) + oskb = skb_copy(skb, GFP_ATOMIC); + if (!oskb) break; - msg_set_destnode(buf_msg(obuf), dnode); - tipc_link_xmit(obuf, dnode, dnode); + msg_set_destnode(buf_msg(oskb), dnode); + tipc_link_xmit_skb(oskb, dnode, dnode); } rcu_read_unlock(); - kfree_skb(buf); + kfree_skb(skb); } /** @@ -146,8 +113,8 @@ struct sk_buff *tipc_named_publish(struct publication *publ) struct sk_buff *buf; struct distr_item *item; - list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list); - publ_lists[publ->scope]->size++; + list_add_tail_rcu(&publ->local_list, + &tipc_nametbl->publ_list[publ->scope]); if (publ->scope == TIPC_NODE_SCOPE) return NULL; @@ -172,7 +139,6 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ) struct distr_item *item; list_del(&publ->local_list); - publ_lists[publ->scope]->size--; if (publ->scope == TIPC_NODE_SCOPE) return NULL; @@ -190,32 +156,28 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ) /** * named_distribute - prepare name info for bulk distribution to another node - * @msg_list: list of messages (buffers) to be returned from this function + * @list: list of messages (buffers) to be returned from this function * @dnode: node to be updated * @pls: linked list of publication items to be packed into buffer chain */ -static void named_distribute(struct list_head *msg_list, u32 dnode, - struct publ_list *pls) +static void named_distribute(struct sk_buff_head *list, u32 dnode, + struct list_head *pls) { struct publication *publ; - struct sk_buff *buf = NULL; + struct sk_buff *skb = NULL; struct distr_item *item = NULL; - uint dsz = pls->size * ITEM_SIZE; uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE; - uint rem = dsz; - uint msg_rem = 0; + uint msg_rem = msg_dsz; - list_for_each_entry(publ, &pls->list, local_list) { + list_for_each_entry(publ, pls, local_list) { /* Prepare next buffer: */ - if (!buf) { - msg_rem = min_t(uint, rem, msg_dsz); - rem -= msg_rem; - buf = named_prepare_buf(PUBLICATION, msg_rem, dnode); - if (!buf) { + if (!skb) { + skb = named_prepare_buf(PUBLICATION, msg_rem, dnode); + if (!skb) { pr_warn("Bulk publication failure\n"); return; } - item = (struct distr_item *)msg_data(buf_msg(buf)); + item = (struct distr_item *)msg_data(buf_msg(skb)); } /* Pack publication into message: */ @@ -225,10 +187,16 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, /* Append full buffer to list: */ if (!msg_rem) { - list_add_tail((struct list_head *)buf, msg_list); - buf = NULL; + __skb_queue_tail(list, skb); + skb = NULL; + msg_rem = msg_dsz; } } + if (skb) { + msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); + skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); + __skb_queue_tail(list, skb); + } } /** @@ -236,36 +204,68 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, */ void tipc_named_node_up(u32 dnode) { - LIST_HEAD(msg_list); - struct sk_buff *buf_chain; - - read_lock_bh(&tipc_nametbl_lock); - named_distribute(&msg_list, dnode, &publ_cluster); - named_distribute(&msg_list, dnode, &publ_zone); - read_unlock_bh(&tipc_nametbl_lock); - - /* Convert circular list to linear list and send: */ - buf_chain = (struct sk_buff *)msg_list.next; - ((struct sk_buff *)msg_list.prev)->next = NULL; - tipc_link_xmit(buf_chain, dnode, dnode); + struct sk_buff_head head; + + __skb_queue_head_init(&head); + + rcu_read_lock(); + named_distribute(&head, dnode, + &tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]); + named_distribute(&head, dnode, + &tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]); + rcu_read_unlock(); + + tipc_link_xmit(&head, dnode, dnode); +} + +static void tipc_publ_subscribe(struct publication *publ, u32 addr) +{ + struct tipc_node *node; + + if (in_own_node(addr)) + return; + + node = tipc_node_find(addr); + if (!node) { + pr_warn("Node subscription rejected, unknown node 0x%x\n", + addr); + return; + } + + tipc_node_lock(node); + list_add_tail(&publ->nodesub_list, &node->publ_list); + tipc_node_unlock(node); +} + +static void tipc_publ_unsubscribe(struct publication *publ, u32 addr) +{ + struct tipc_node *node; + + node = tipc_node_find(addr); + if (!node) + return; + + tipc_node_lock(node); + list_del_init(&publ->nodesub_list); + tipc_node_unlock(node); } /** - * named_purge_publ - remove publication associated with a failed node + * tipc_publ_purge - remove publication associated with a failed node * * Invoked for each publication issued by a newly failed node. * Removes publication structure from name table & deletes it. */ -static void named_purge_publ(struct publication *publ) +static void tipc_publ_purge(struct publication *publ, u32 addr) { struct publication *p; - write_lock_bh(&tipc_nametbl_lock); + spin_lock_bh(&tipc_nametbl_lock); p = tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, publ->ref, publ->key); if (p) - tipc_nodesub_unsubscribe(&p->subscr); - write_unlock_bh(&tipc_nametbl_lock); + tipc_publ_unsubscribe(p, addr); + spin_unlock_bh(&tipc_nametbl_lock); if (p != publ) { pr_err("Unable to remove publication from failed node\n" @@ -274,7 +274,15 @@ static void named_purge_publ(struct publication *publ) publ->key); } - kfree(p); + kfree_rcu(p, rcu); +} + +void tipc_publ_notify(struct list_head *nsub_list, u32 addr) +{ + struct publication *publ, *tmp; + + list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) + tipc_publ_purge(publ, addr); } /** @@ -294,9 +302,7 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype) TIPC_CLUSTER_SCOPE, node, ntohl(i->ref), ntohl(i->key)); if (publ) { - tipc_nodesub_subscribe(&publ->subscr, node, publ, - (net_ev_handler) - named_purge_publ); + tipc_publ_subscribe(publ, node); return true; } } else if (dtype == WITHDRAWAL) { @@ -304,8 +310,8 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype) node, ntohl(i->ref), ntohl(i->key)); if (publ) { - tipc_nodesub_unsubscribe(&publ->subscr); - kfree(publ); + tipc_publ_unsubscribe(publ, node); + kfree_rcu(publ, rcu); return true; } } else { @@ -370,14 +376,14 @@ void tipc_named_rcv(struct sk_buff *buf) u32 count = msg_data_sz(msg) / ITEM_SIZE; u32 node = msg_orignode(msg); - write_lock_bh(&tipc_nametbl_lock); + spin_lock_bh(&tipc_nametbl_lock); while (count--) { if (!tipc_update_nametbl(item, node, msg_type(msg))) tipc_named_add_backlog(item, msg_type(msg), node); item++; } tipc_named_process_backlog(); - write_unlock_bh(&tipc_nametbl_lock); + spin_unlock_bh(&tipc_nametbl_lock); kfree_skb(buf); } @@ -393,11 +399,12 @@ void tipc_named_reinit(void) struct publication *publ; int scope; - write_lock_bh(&tipc_nametbl_lock); + spin_lock_bh(&tipc_nametbl_lock); for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++) - list_for_each_entry(publ, &publ_lists[scope]->list, local_list) + list_for_each_entry_rcu(publ, &tipc_nametbl->publ_list[scope], + local_list) publ->node = tipc_own_addr; - write_unlock_bh(&tipc_nametbl_lock); + spin_unlock_bh(&tipc_nametbl_lock); } diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h index b9e75feb3434e76fc96a5e71a07e94fbcc709888..cef55cedcfb29c76b8367486d1e28cb376118339 100644 --- a/net/tipc/name_distr.h +++ b/net/tipc/name_distr.h @@ -74,5 +74,6 @@ void tipc_named_node_up(u32 dnode); void tipc_named_rcv(struct sk_buff *buf); void tipc_named_reinit(void); void tipc_named_process_backlog(void); +void tipc_publ_notify(struct list_head *nsub_list, u32 addr); #endif diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 3a6a0a7c0759f01ad06b7c8d9b08a6c8d98e4ca9..c8df0223371a7b86f971093fee1ac1ba47bffb26 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -1,8 +1,8 @@ /* * net/tipc/name_table.c: TIPC name table code * - * Copyright (c) 2000-2006, Ericsson AB - * Copyright (c) 2004-2008, 2010-2011, Wind River Systems + * Copyright (c) 2000-2006, 2014, Ericsson AB + * Copyright (c) 2004-2008, 2010-2014, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -42,6 +42,12 @@ #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ +static const struct nla_policy +tipc_nl_name_table_policy[TIPC_NLA_NAME_TABLE_MAX + 1] = { + [TIPC_NLA_NAME_TABLE_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_NAME_TABLE_PUBL] = { .type = NLA_NESTED } +}; + /** * struct name_info - name sequence publication info * @node_list: circular list of publications made by own node @@ -86,6 +92,7 @@ struct sub_seq { * @ns_list: links to adjacent name sequences in hash chain * @subscriptions: list of subscriptions for this 'type' * @lock: spinlock controlling access to publication lists of all sub-sequences + * @rcu: RCU callback head used for deferred freeing */ struct name_seq { u32 type; @@ -95,21 +102,11 @@ struct name_seq { struct hlist_node ns_list; struct list_head subscriptions; spinlock_t lock; + struct rcu_head rcu; }; -/** - * struct name_table - table containing all existing port name publications - * @types: pointer to fixed-sized array of name sequence lists, - * accessed via hashing on 'type'; name sequence lists are *not* sorted - * @local_publ_count: number of publications issued by this node - */ -struct name_table { - struct hlist_head *types; - u32 local_publ_count; -}; - -static struct name_table table; -DEFINE_RWLOCK(tipc_nametbl_lock); +struct name_table *tipc_nametbl; +DEFINE_SPINLOCK(tipc_nametbl_lock); static int hash(int x) { @@ -136,9 +133,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper, publ->node = node; publ->ref = port_ref; publ->key = key; - INIT_LIST_HEAD(&publ->local_list); INIT_LIST_HEAD(&publ->pport_list); - INIT_LIST_HEAD(&publ->subscr.nodesub_list); return publ; } @@ -173,22 +168,10 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea nseq->alloc = 1; INIT_HLIST_NODE(&nseq->ns_list); INIT_LIST_HEAD(&nseq->subscriptions); - hlist_add_head(&nseq->ns_list, seq_head); + hlist_add_head_rcu(&nseq->ns_list, seq_head); return nseq; } -/* - * nameseq_delete_empty - deletes a name sequence structure if now unused - */ -static void nameseq_delete_empty(struct name_seq *seq) -{ - if (!seq->first_free && list_empty(&seq->subscriptions)) { - hlist_del_init(&seq->ns_list); - kfree(seq->sseqs); - kfree(seq); - } -} - /** * nameseq_find_subseq - find sub-sequence (if any) matching a name instance * @@ -469,8 +452,8 @@ static struct name_seq *nametbl_find_seq(u32 type) struct hlist_head *seq_head; struct name_seq *ns; - seq_head = &table.types[hash(type)]; - hlist_for_each_entry(ns, seq_head, ns_list) { + seq_head = &tipc_nametbl->seq_hlist[hash(type)]; + hlist_for_each_entry_rcu(ns, seq_head, ns_list) { if (ns->type == type) return ns; } @@ -481,7 +464,9 @@ static struct name_seq *nametbl_find_seq(u32 type) struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) { + struct publication *publ; struct name_seq *seq = nametbl_find_seq(type); + int index = hash(type); if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) || (lower > upper)) { @@ -491,12 +476,16 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, } if (!seq) - seq = tipc_nameseq_create(type, &table.types[hash(type)]); + seq = tipc_nameseq_create(type, + &tipc_nametbl->seq_hlist[index]); if (!seq) return NULL; - return tipc_nameseq_insert_publ(seq, type, lower, upper, + spin_lock_bh(&seq->lock); + publ = tipc_nameseq_insert_publ(seq, type, lower, upper, scope, node, port, key); + spin_unlock_bh(&seq->lock); + return publ; } struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, @@ -508,8 +497,16 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, if (!seq) return NULL; + spin_lock_bh(&seq->lock); publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); - nameseq_delete_empty(seq); + if (!seq->first_free && list_empty(&seq->subscriptions)) { + hlist_del_init_rcu(&seq->ns_list); + kfree(seq->sseqs); + spin_unlock_bh(&seq->lock); + kfree_rcu(seq, rcu); + return publ; + } + spin_unlock_bh(&seq->lock); return publ; } @@ -538,14 +535,14 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) if (!tipc_in_scope(*destnode, tipc_own_addr)) return 0; - read_lock_bh(&tipc_nametbl_lock); + rcu_read_lock(); seq = nametbl_find_seq(type); if (unlikely(!seq)) goto not_found; + spin_lock_bh(&seq->lock); sseq = nameseq_find_subseq(seq, instance); if (unlikely(!sseq)) - goto not_found; - spin_lock_bh(&seq->lock); + goto no_match; info = sseq->info; /* Closest-First Algorithm */ @@ -595,7 +592,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) no_match: spin_unlock_bh(&seq->lock); not_found: - read_unlock_bh(&tipc_nametbl_lock); + rcu_read_unlock(); *destnode = node; return ref; } @@ -621,13 +618,12 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, struct name_info *info; int res = 0; - read_lock_bh(&tipc_nametbl_lock); + rcu_read_lock(); seq = nametbl_find_seq(type); if (!seq) goto exit; spin_lock_bh(&seq->lock); - sseq = seq->sseqs + nameseq_locate_subseq(seq, lower); sseq_stop = seq->sseqs + seq->first_free; for (; sseq != sseq_stop; sseq++) { @@ -645,10 +641,9 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, if (info->cluster_list_size != info->node_list_size) res = 1; } - spin_unlock_bh(&seq->lock); exit: - read_unlock_bh(&tipc_nametbl_lock); + rcu_read_unlock(); return res; } @@ -661,22 +656,23 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, struct publication *publ; struct sk_buff *buf = NULL; - if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) { + spin_lock_bh(&tipc_nametbl_lock); + if (tipc_nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) { pr_warn("Publication failed, local publication limit reached (%u)\n", TIPC_MAX_PUBLICATIONS); + spin_unlock_bh(&tipc_nametbl_lock); return NULL; } - write_lock_bh(&tipc_nametbl_lock); publ = tipc_nametbl_insert_publ(type, lower, upper, scope, tipc_own_addr, port_ref, key); if (likely(publ)) { - table.local_publ_count++; + tipc_nametbl->local_publ_count++; buf = tipc_named_publish(publ); /* Any pending external events? */ tipc_named_process_backlog(); } - write_unlock_bh(&tipc_nametbl_lock); + spin_unlock_bh(&tipc_nametbl_lock); if (buf) named_cluster_distribute(buf); @@ -689,27 +685,28 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) { struct publication *publ; - struct sk_buff *buf; + struct sk_buff *skb = NULL; - write_lock_bh(&tipc_nametbl_lock); + spin_lock_bh(&tipc_nametbl_lock); publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); if (likely(publ)) { - table.local_publ_count--; - buf = tipc_named_withdraw(publ); + tipc_nametbl->local_publ_count--; + skb = tipc_named_withdraw(publ); /* Any pending external events? */ tipc_named_process_backlog(); - write_unlock_bh(&tipc_nametbl_lock); list_del_init(&publ->pport_list); - kfree(publ); + kfree_rcu(publ, rcu); + } else { + pr_err("Unable to remove local publication\n" + "(type=%u, lower=%u, ref=%u, key=%u)\n", + type, lower, ref, key); + } + spin_unlock_bh(&tipc_nametbl_lock); - if (buf) - named_cluster_distribute(buf); + if (skb) { + named_cluster_distribute(skb); return 1; } - write_unlock_bh(&tipc_nametbl_lock); - pr_err("Unable to remove local publication\n" - "(type=%u, lower=%u, ref=%u, key=%u)\n", - type, lower, ref, key); return 0; } @@ -719,12 +716,14 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) void tipc_nametbl_subscribe(struct tipc_subscription *s) { u32 type = s->seq.type; + int index = hash(type); struct name_seq *seq; - write_lock_bh(&tipc_nametbl_lock); + spin_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(type); if (!seq) - seq = tipc_nameseq_create(type, &table.types[hash(type)]); + seq = tipc_nameseq_create(type, + &tipc_nametbl->seq_hlist[index]); if (seq) { spin_lock_bh(&seq->lock); tipc_nameseq_subscribe(seq, s); @@ -733,7 +732,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s) pr_warn("Failed to create subscription for {%u,%u,%u}\n", s->seq.type, s->seq.lower, s->seq.upper); } - write_unlock_bh(&tipc_nametbl_lock); + spin_unlock_bh(&tipc_nametbl_lock); } /** @@ -743,18 +742,23 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s) { struct name_seq *seq; - write_lock_bh(&tipc_nametbl_lock); + spin_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(s->seq.type); if (seq != NULL) { spin_lock_bh(&seq->lock); list_del_init(&s->nameseq_list); - spin_unlock_bh(&seq->lock); - nameseq_delete_empty(seq); + if (!seq->first_free && list_empty(&seq->subscriptions)) { + hlist_del_init_rcu(&seq->ns_list); + kfree(seq->sseqs); + spin_unlock_bh(&seq->lock); + kfree_rcu(seq, rcu); + } else { + spin_unlock_bh(&seq->lock); + } } - write_unlock_bh(&tipc_nametbl_lock); + spin_unlock_bh(&tipc_nametbl_lock); } - /** * subseq_list - print specified sub-sequence contents into the given buffer */ @@ -876,8 +880,8 @@ static int nametbl_list(char *buf, int len, u32 depth_info, lowbound = 0; upbound = ~0; for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { - seq_head = &table.types[i]; - hlist_for_each_entry(seq, seq_head, ns_list) { + seq_head = &tipc_nametbl->seq_hlist[i]; + hlist_for_each_entry_rcu(seq, seq_head, ns_list) { ret += nameseq_list(seq, buf + ret, len - ret, depth, seq->type, lowbound, upbound, i); @@ -892,8 +896,8 @@ static int nametbl_list(char *buf, int len, u32 depth_info, } ret += nametbl_header(buf + ret, len - ret, depth); i = hash(type); - seq_head = &table.types[i]; - hlist_for_each_entry(seq, seq_head, ns_list) { + seq_head = &tipc_nametbl->seq_hlist[i]; + hlist_for_each_entry_rcu(seq, seq_head, ns_list) { if (seq->type == type) { ret += nameseq_list(seq, buf + ret, len - ret, depth, type, @@ -925,11 +929,11 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) pb = TLV_DATA(rep_tlv); pb_len = ULTRA_STRING_MAX_LEN; argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area); - read_lock_bh(&tipc_nametbl_lock); + rcu_read_lock(); str_len = nametbl_list(pb, pb_len, ntohl(argv->depth), ntohl(argv->type), ntohl(argv->lowbound), ntohl(argv->upbound)); - read_unlock_bh(&tipc_nametbl_lock); + rcu_read_unlock(); str_len += 1; /* for "\0" */ skb_put(buf, TLV_SPACE(str_len)); TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); @@ -939,12 +943,18 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) int tipc_nametbl_init(void) { - table.types = kcalloc(TIPC_NAMETBL_SIZE, sizeof(struct hlist_head), - GFP_ATOMIC); - if (!table.types) + int i; + + tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC); + if (!tipc_nametbl) return -ENOMEM; - table.local_publ_count = 0; + for (i = 0; i < TIPC_NAMETBL_SIZE; i++) + INIT_HLIST_HEAD(&tipc_nametbl->seq_hlist[i]); + + INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]); + INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]); + INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]); return 0; } @@ -959,17 +969,19 @@ static void tipc_purge_publications(struct name_seq *seq) struct sub_seq *sseq; struct name_info *info; - if (!seq->sseqs) { - nameseq_delete_empty(seq); - return; - } + spin_lock_bh(&seq->lock); sseq = seq->sseqs; info = sseq->info; list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, publ->ref, publ->key); - kfree(publ); + kfree_rcu(publ, rcu); } + hlist_del_init_rcu(&seq->ns_list); + kfree(seq->sseqs); + spin_unlock_bh(&seq->lock); + + kfree_rcu(seq, rcu); } void tipc_nametbl_stop(void) @@ -977,21 +989,202 @@ void tipc_nametbl_stop(void) u32 i; struct name_seq *seq; struct hlist_head *seq_head; - struct hlist_node *safe; /* Verify name table is empty and purge any lingering * publications, then release the name table */ - write_lock_bh(&tipc_nametbl_lock); + spin_lock_bh(&tipc_nametbl_lock); for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { - if (hlist_empty(&table.types[i])) + if (hlist_empty(&tipc_nametbl->seq_hlist[i])) continue; - seq_head = &table.types[i]; - hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) { + seq_head = &tipc_nametbl->seq_hlist[i]; + hlist_for_each_entry_rcu(seq, seq_head, ns_list) { tipc_purge_publications(seq); } } - kfree(table.types); - table.types = NULL; - write_unlock_bh(&tipc_nametbl_lock); + spin_unlock_bh(&tipc_nametbl_lock); + + synchronize_net(); + kfree(tipc_nametbl); + +} + +static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, + struct name_seq *seq, + struct sub_seq *sseq, u32 *last_publ) +{ + void *hdr; + struct nlattr *attrs; + struct nlattr *publ; + struct publication *p; + + if (*last_publ) { + list_for_each_entry(p, &sseq->info->zone_list, zone_list) + if (p->key == *last_publ) + break; + if (p->key != *last_publ) + return -EPIPE; + } else { + p = list_first_entry(&sseq->info->zone_list, struct publication, + zone_list); + } + + list_for_each_entry_from(p, &sseq->info->zone_list, zone_list) { + *last_publ = p->key; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, + &tipc_genl_v2_family, NLM_F_MULTI, + TIPC_NL_NAME_TABLE_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE); + if (!attrs) + goto msg_full; + + publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL); + if (!publ) + goto attr_msg_full; + + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key)) + goto publ_msg_full; + + nla_nest_end(msg->skb, publ); + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + } + *last_publ = 0; + + return 0; + +publ_msg_full: + nla_nest_cancel(msg->skb, publ); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq, + u32 *last_lower, u32 *last_publ) +{ + struct sub_seq *sseq; + struct sub_seq *sseq_start; + int err; + + if (*last_lower) { + sseq_start = nameseq_find_subseq(seq, *last_lower); + if (!sseq_start) + return -EPIPE; + } else { + sseq_start = seq->sseqs; + } + + for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) { + err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ); + if (err) { + *last_lower = sseq->lower; + return err; + } + } + *last_lower = 0; + + return 0; +} + +static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type, + u32 *last_lower, u32 *last_publ) +{ + struct hlist_head *seq_head; + struct name_seq *seq = NULL; + int err; + int i; + + if (*last_type) + i = hash(*last_type); + else + i = 0; + + for (; i < TIPC_NAMETBL_SIZE; i++) { + seq_head = &tipc_nametbl->seq_hlist[i]; + + if (*last_type) { + seq = nametbl_find_seq(*last_type); + if (!seq) + return -EPIPE; + } else { + hlist_for_each_entry_rcu(seq, seq_head, ns_list) + break; + if (!seq) + continue; + } + + hlist_for_each_entry_from_rcu(seq, ns_list) { + spin_lock_bh(&seq->lock); + err = __tipc_nl_subseq_list(msg, seq, last_lower, + last_publ); + + if (err) { + *last_type = seq->type; + spin_unlock_bh(&seq->lock); + return err; + } + spin_unlock_bh(&seq->lock); + } + *last_type = 0; + } + return 0; +} + +int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int done = cb->args[3]; + u32 last_type = cb->args[0]; + u32 last_lower = cb->args[1]; + u32 last_publ = cb->args[2]; + struct tipc_nl_msg msg; + + if (done) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + rcu_read_lock(); + err = __tipc_nl_seq_list(&msg, &last_type, &last_lower, &last_publ); + if (!err) { + done = 1; + } else if (err != -EMSGSIZE) { + /* We never set seq or call nl_dump_check_consistent() this + * means that setting prev_seq here will cause the consistence + * check to fail in the netlink callback handler. Resulting in + * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if + * we got an error. + */ + cb->prev_seq = 1; + } + rcu_read_unlock(); + + cb->args[0] = last_type; + cb->args[1] = last_lower; + cb->args[2] = last_publ; + cb->args[3] = done; + + return skb->len; } diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index f02f48b9a216e549a02a5518040ac7f55a085dbc..5f0dee92010d24e3d1d8757390c255e119173e82 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -1,7 +1,7 @@ /* * net/tipc/name_table.h: Include file for TIPC name table code * - * Copyright (c) 2000-2006, Ericsson AB + * Copyright (c) 2000-2006, 2014, Ericsson AB * Copyright (c) 2004-2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -37,15 +37,15 @@ #ifndef _TIPC_NAME_TABLE_H #define _TIPC_NAME_TABLE_H -#include "node_subscr.h" - struct tipc_subscription; struct tipc_port_list; /* * TIPC name types reserved for internal TIPC use (both current and planned) */ -#define TIPC_ZM_SRV 3 /* zone master service name type */ +#define TIPC_ZM_SRV 3 /* zone master service name type */ +#define TIPC_PUBL_SCOPE_NUM (TIPC_NODE_SCOPE + 1) +#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ /** * struct publication - info about a published (name or) name sequence @@ -56,12 +56,13 @@ struct tipc_port_list; * @node: network address of publishing port's node * @ref: publishing port * @key: publication key - * @subscr: subscription to "node down" event (for off-node publications only) + * @nodesub_list: subscription to "node down" event (off-node publication only) * @local_list: adjacent entries in list of publications made by this node * @pport_list: adjacent entries in list of publications made by this port * @node_list: adjacent matching name seq publications with >= node scope * @cluster_list: adjacent matching name seq publications with >= cluster scope * @zone_list: adjacent matching name seq publications with >= zone scope + * @rcu: RCU callback head used for deferred freeing * * Note that the node list, cluster list, and zone list are circular lists. */ @@ -73,16 +74,31 @@ struct publication { u32 node; u32 ref; u32 key; - struct tipc_node_subscr subscr; + struct list_head nodesub_list; struct list_head local_list; struct list_head pport_list; struct list_head node_list; struct list_head cluster_list; struct list_head zone_list; + struct rcu_head rcu; +}; + +/** + * struct name_table - table containing all existing port name publications + * @seq_hlist: name sequence hash lists + * @publ_list: pulication lists + * @local_publ_count: number of publications issued by this node + */ +struct name_table { + struct hlist_head seq_hlist[TIPC_NAMETBL_SIZE]; + struct list_head publ_list[TIPC_PUBL_SCOPE_NUM]; + u32 local_publ_count; }; +extern spinlock_t tipc_nametbl_lock; +extern struct name_table *tipc_nametbl; -extern rwlock_t tipc_nametbl_lock; +int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space); u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); diff --git a/net/tipc/net.c b/net/tipc/net.c index 93b9944a6a8bb1e94c332b8bd5e5f641d90899d8..cf13df3cde8f9ec48ddb8ee2f326e2caad9a25de 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -42,6 +42,11 @@ #include "node.h" #include "config.h" +static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { + [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_NET_ID] = { .type = NLA_U32 } +}; + /* * The TIPC locking policy is designed to ensure a very fine locking * granularity, permitting complete parallel access to individual @@ -138,3 +143,104 @@ void tipc_net_stop(void) pr_info("Left network mode\n"); } + +static int __tipc_nl_add_net(struct tipc_nl_msg *msg) +{ + void *hdr; + struct nlattr *attrs; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_NET_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_NET); + if (!attrs) + goto msg_full; + + if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tipc_net_id)) + goto attr_msg_full; + + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int done = cb->args[0]; + struct tipc_nl_msg msg; + + if (done) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + err = __tipc_nl_add_net(&msg); + if (err) + goto out; + + done = 1; +out: + cb->args[0] = done; + + return skb->len; +} + +int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) +{ + int err; + struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; + + if (!info->attrs[TIPC_NLA_NET]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, + info->attrs[TIPC_NLA_NET], + tipc_nl_net_policy); + if (err) + return err; + + if (attrs[TIPC_NLA_NET_ID]) { + u32 val; + + /* Can't change net id once TIPC has joined a network */ + if (tipc_own_addr) + return -EPERM; + + val = nla_get_u32(attrs[TIPC_NLA_NET_ID]); + if (val < 1 || val > 9999) + return -EINVAL; + + tipc_net_id = val; + } + + if (attrs[TIPC_NLA_NET_ADDR]) { + u32 addr; + + /* Can't change net addr once TIPC has joined a network */ + if (tipc_own_addr) + return -EPERM; + + addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); + if (!tipc_addr_node_valid(addr)) + return -EINVAL; + + rtnl_lock(); + tipc_net_start(addr); + rtnl_unlock(); + } + + return 0; +} diff --git a/net/tipc/net.h b/net/tipc/net.h index 59ef3388be2ce151320503d46a3f6f766375acee..a81c1b9eb150630706a807679d841afaa837cec3 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h @@ -1,7 +1,7 @@ /* * net/tipc/net.h: Include file for TIPC network routing code * - * Copyright (c) 1995-2006, Ericsson AB + * Copyright (c) 1995-2006, 2014, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -37,7 +37,13 @@ #ifndef _TIPC_NET_H #define _TIPC_NET_H +#include + int tipc_net_start(u32 addr); + void tipc_net_stop(void); +int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); + #endif diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index ad844d3653409a6f5ad2ceac53e1b52dc48eacaa..b891e3905bc42255e7bfc88de0a98af05238895a 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -1,7 +1,7 @@ /* * net/tipc/netlink.c: TIPC configuration handling * - * Copyright (c) 2005-2006, Ericsson AB + * Copyright (c) 2005-2006, 2014, Ericsson AB * Copyright (c) 2005-2007, Wind River Systems * All rights reserved. * @@ -36,6 +36,12 @@ #include "core.h" #include "config.h" +#include "socket.h" +#include "name_table.h" +#include "bearer.h" +#include "link.h" +#include "node.h" +#include "net.h" #include static int handle_cmd(struct sk_buff *skb, struct genl_info *info) @@ -68,6 +74,19 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info) return 0; } +static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = { + [TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, }, + [TIPC_NLA_BEARER] = { .type = NLA_NESTED, }, + [TIPC_NLA_SOCK] = { .type = NLA_NESTED, }, + [TIPC_NLA_PUBL] = { .type = NLA_NESTED, }, + [TIPC_NLA_LINK] = { .type = NLA_NESTED, }, + [TIPC_NLA_MEDIA] = { .type = NLA_NESTED, }, + [TIPC_NLA_NODE] = { .type = NLA_NESTED, }, + [TIPC_NLA_NET] = { .type = NLA_NESTED, }, + [TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, } +}; + +/* Legacy ASCII API */ static struct genl_family tipc_genl_family = { .id = GENL_ID_GENERATE, .name = TIPC_GENL_NAME, @@ -76,6 +95,7 @@ static struct genl_family tipc_genl_family = { .maxattr = 0, }; +/* Legacy ASCII API */ static struct genl_ops tipc_genl_ops[] = { { .cmd = TIPC_GENL_CMD, @@ -83,11 +103,121 @@ static struct genl_ops tipc_genl_ops[] = { }, }; +/* Users of the legacy API (tipc-config) can't handle that we add operations, + * so we have a separate genl handling for the new API. + */ +struct genl_family tipc_genl_v2_family = { + .id = GENL_ID_GENERATE, + .name = TIPC_GENL_V2_NAME, + .version = TIPC_GENL_V2_VERSION, + .hdrsize = 0, + .maxattr = TIPC_NLA_MAX, +}; + +static const struct genl_ops tipc_genl_v2_ops[] = { + { + .cmd = TIPC_NL_BEARER_DISABLE, + .doit = tipc_nl_bearer_disable, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_BEARER_ENABLE, + .doit = tipc_nl_bearer_enable, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_BEARER_GET, + .doit = tipc_nl_bearer_get, + .dumpit = tipc_nl_bearer_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_BEARER_SET, + .doit = tipc_nl_bearer_set, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_SOCK_GET, + .dumpit = tipc_nl_sk_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_PUBL_GET, + .dumpit = tipc_nl_publ_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_LINK_GET, + .doit = tipc_nl_link_get, + .dumpit = tipc_nl_link_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_LINK_SET, + .doit = tipc_nl_link_set, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_LINK_RESET_STATS, + .doit = tipc_nl_link_reset_stats, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_MEDIA_GET, + .doit = tipc_nl_media_get, + .dumpit = tipc_nl_media_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_MEDIA_SET, + .doit = tipc_nl_media_set, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_NODE_GET, + .dumpit = tipc_nl_node_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_NET_GET, + .dumpit = tipc_nl_net_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_NET_SET, + .doit = tipc_nl_net_set, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_NAME_TABLE_GET, + .dumpit = tipc_nl_name_table_dump, + .policy = tipc_nl_policy, + } +}; + +int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr) +{ + u32 maxattr = tipc_genl_v2_family.maxattr; + + *attr = tipc_genl_v2_family.attrbuf; + if (!*attr) + return -EOPNOTSUPP; + + return nlmsg_parse(nlh, GENL_HDRLEN, *attr, maxattr, tipc_nl_policy); +} + int tipc_netlink_start(void) { int res; res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops); + if (res) { + pr_err("Failed to register legacy interface\n"); + return res; + } + + res = genl_register_family_with_ops(&tipc_genl_v2_family, + tipc_genl_v2_ops); if (res) { pr_err("Failed to register netlink interface\n"); return res; @@ -98,4 +228,5 @@ int tipc_netlink_start(void) void tipc_netlink_stop(void) { genl_unregister_family(&tipc_genl_family); + genl_unregister_family(&tipc_genl_v2_family); } diff --git a/net/tipc/node_subscr.h b/net/tipc/netlink.h similarity index 63% rename from net/tipc/node_subscr.h rename to net/tipc/netlink.h index d91b8cc81e3d786948bd4ed9ac622689c4218ea1..1425c6869de0234d00488e533d25c4ecaf0541e1 100644 --- a/net/tipc/node_subscr.h +++ b/net/tipc/netlink.h @@ -1,8 +1,7 @@ /* - * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling + * net/tipc/netlink.h: Include file for TIPC netlink code * - * Copyright (c) 1995-2006, Ericsson AB - * Copyright (c) 2005, 2010-2011, Wind River Systems + * Copyright (c) 2014, Ericsson AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,30 +33,16 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _TIPC_NODE_SUBSCR_H -#define _TIPC_NODE_SUBSCR_H +#ifndef _TIPC_NETLINK_H +#define _TIPC_NETLINK_H -#include "addr.h" +extern struct genl_family tipc_genl_v2_family; +int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf); -typedef void (*net_ev_handler) (void *usr_handle); - -/** - * struct tipc_node_subscr - "node down" subscription entry - * @node: ptr to node structure of interest (or NULL, if none) - * @handle_node_down: routine to invoke when node fails - * @usr_handle: argument to pass to routine when node fails - * @nodesub_list: adjacent entries in list of subscriptions for the node - */ -struct tipc_node_subscr { - struct tipc_node *node; - net_ev_handler handle_node_down; - void *usr_handle; - struct list_head nodesub_list; +struct tipc_nl_msg { + struct sk_buff *skb; + u32 portid; + u32 seq; }; -void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, - void *usr_handle, net_ev_handler handle_down); -void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub); -void tipc_nodesub_notify(struct list_head *nsub_list); - #endif diff --git a/net/tipc/node.c b/net/tipc/node.c index 5781634e957d7be47c2e572783f3af2a3db9bf91..8d353ec77a6661820066e4cbee0d74cfe534ea08 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -58,6 +58,12 @@ struct tipc_sock_conn { struct list_head list; }; +static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = { + [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 }, + [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG } +}; + /* * A trivial power-of-two bitmask technique is used for speed, since this * operation is done for every incoming TIPC packet. The number of hash table @@ -107,9 +113,10 @@ struct tipc_node *tipc_node_create(u32 addr) spin_lock_init(&n_ptr->lock); INIT_HLIST_NODE(&n_ptr->hash); INIT_LIST_HEAD(&n_ptr->list); - INIT_LIST_HEAD(&n_ptr->nsub); + INIT_LIST_HEAD(&n_ptr->publ_list); INIT_LIST_HEAD(&n_ptr->conn_sks); - __skb_queue_head_init(&n_ptr->waiting_sks); + skb_queue_head_init(&n_ptr->waiting_sks); + __skb_queue_head_init(&n_ptr->bclink.deferred_queue); hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); @@ -375,8 +382,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) /* Flush broadcast link info associated with lost node */ if (n_ptr->bclink.recv_permitted) { - kfree_skb_list(n_ptr->bclink.deferred_head); - n_ptr->bclink.deferred_size = 0; + __skb_queue_purge(&n_ptr->bclink.deferred_queue); if (n_ptr->bclink.reasm_buf) { kfree_skb(n_ptr->bclink.reasm_buf); @@ -568,7 +574,7 @@ void tipc_node_unlock(struct tipc_node *node) skb_queue_splice_init(&node->waiting_sks, &waiting_sks); if (flags & TIPC_NOTIFY_NODE_DOWN) { - list_replace_init(&node->nsub, &nsub_list); + list_replace_init(&node->publ_list, &nsub_list); list_replace_init(&node->conn_sks, &conn_sks); } node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN | @@ -585,7 +591,7 @@ void tipc_node_unlock(struct tipc_node *node) tipc_node_abort_sock_conns(&conn_sks); if (!list_empty(&nsub_list)) - tipc_nodesub_notify(&nsub_list); + tipc_publ_notify(&nsub_list, addr); if (flags & TIPC_WAKEUP_BCAST_USERS) tipc_bclink_wakeup_users(); @@ -601,3 +607,93 @@ void tipc_node_unlock(struct tipc_node *node) tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, link_id, addr); } + +/* Caller should hold node lock for the passed node */ +static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) +{ + void *hdr; + struct nlattr *attrs; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_NODE_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE); + if (!attrs) + goto msg_full; + + if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) + goto attr_msg_full; + if (tipc_node_is_up(node)) + if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) + goto attr_msg_full; + + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int done = cb->args[0]; + int last_addr = cb->args[1]; + struct tipc_node *node; + struct tipc_nl_msg msg; + + if (done) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + rcu_read_lock(); + + if (last_addr && !tipc_node_find(last_addr)) { + rcu_read_unlock(); + /* We never set seq or call nl_dump_check_consistent() this + * means that setting prev_seq here will cause the consistence + * check to fail in the netlink callback handler. Resulting in + * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if + * the node state changed while we released the lock. + */ + cb->prev_seq = 1; + return -EPIPE; + } + + list_for_each_entry_rcu(node, &tipc_node_list, list) { + if (last_addr) { + if (node->addr == last_addr) + last_addr = 0; + else + continue; + } + + tipc_node_lock(node); + err = __tipc_nl_add_node(&msg, node); + if (err) { + last_addr = node->addr; + tipc_node_unlock(node); + goto out; + } + + tipc_node_unlock(node); + } + done = 1; +out: + cb->args[0] = done; + cb->args[1] = last_addr; + rcu_read_unlock(); + + return skb->len; +} diff --git a/net/tipc/node.h b/net/tipc/node.h index 04e91458bb29a17916964e65fbca8a99a35a7f0d..cbe0e950f1ccb81a63a3e0a19719cdef6fb2c6c5 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -1,7 +1,7 @@ /* * net/tipc/node.h: Include file for TIPC node management routines * - * Copyright (c) 2000-2006, Ericsson AB + * Copyright (c) 2000-2006, 2014, Ericsson AB * Copyright (c) 2005, 2010-2014, Wind River Systems * All rights reserved. * @@ -37,7 +37,6 @@ #ifndef _TIPC_NODE_H #define _TIPC_NODE_H -#include "node_subscr.h" #include "addr.h" #include "net.h" #include "bearer.h" @@ -72,9 +71,7 @@ enum { * @last_in: sequence # of last in-sequence b'cast message received from node * @last_sent: sequence # of last b'cast message sent by node * @oos_state: state tracker for handling OOS b'cast messages - * @deferred_size: number of OOS b'cast messages in deferred queue - * @deferred_head: oldest OOS b'cast message received from node - * @deferred_tail: newest OOS b'cast message received from node + * @deferred_queue: deferred queue saved OOS b'cast message received from node * @reasm_buf: broadcast reassembly queue head from node * @recv_permitted: true if node is allowed to receive b'cast messages */ @@ -84,8 +81,7 @@ struct tipc_node_bclink { u32 last_sent; u32 oos_state; u32 deferred_size; - struct sk_buff *deferred_head; - struct sk_buff *deferred_tail; + struct sk_buff_head deferred_queue; struct sk_buff *reasm_buf; bool recv_permitted; }; @@ -104,7 +100,7 @@ struct tipc_node_bclink { * @link_cnt: number of links to node * @signature: node instance identifier * @link_id: local and remote bearer ids of changing link, if any - * @nsub: list of "node down" subscriptions monitoring node + * @publ_list: list of publications * @rcu: rcu struct for tipc_node */ struct tipc_node { @@ -121,7 +117,7 @@ struct tipc_node { int working_links; u32 signature; u32 link_id; - struct list_head nsub; + struct list_head publ_list; struct sk_buff_head waiting_sks; struct list_head conn_sks; struct rcu_head rcu; @@ -145,6 +141,8 @@ void tipc_node_unlock(struct tipc_node *node); int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port); void tipc_node_remove_conn(u32 dnode, u32 port); +int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb); + static inline void tipc_node_lock(struct tipc_node *node) { spin_lock_bh(&node->lock); diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c deleted file mode 100644 index 2d13eea8574a9a3b74f2959d35031183686bc44f..0000000000000000000000000000000000000000 --- a/net/tipc/node_subscr.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * net/tipc/node_subscr.c: TIPC "node down" subscription handling - * - * Copyright (c) 1995-2006, Ericsson AB - * Copyright (c) 2005, 2010-2011, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include "core.h" -#include "node_subscr.h" -#include "node.h" - -/** - * tipc_nodesub_subscribe - create "node down" subscription for specified node - */ -void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, - void *usr_handle, net_ev_handler handle_down) -{ - if (in_own_node(addr)) { - node_sub->node = NULL; - return; - } - - node_sub->node = tipc_node_find(addr); - if (!node_sub->node) { - pr_warn("Node subscription rejected, unknown node 0x%x\n", - addr); - return; - } - node_sub->handle_node_down = handle_down; - node_sub->usr_handle = usr_handle; - - tipc_node_lock(node_sub->node); - list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub); - tipc_node_unlock(node_sub->node); -} - -/** - * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) - */ -void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) -{ - if (!node_sub->node) - return; - - tipc_node_lock(node_sub->node); - list_del_init(&node_sub->nodesub_list); - tipc_node_unlock(node_sub->node); -} - -/** - * tipc_nodesub_notify - notify subscribers that a node is unreachable - * - * Note: node is locked by caller - */ -void tipc_nodesub_notify(struct list_head *nsub_list) -{ - struct tipc_node_subscr *ns, *safe; - net_ev_handler handle_node_down; - - list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) { - handle_node_down = ns->handle_node_down; - if (handle_node_down) { - ns->handle_node_down = NULL; - handle_node_down(ns->usr_handle); - } - } -} diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 51bddc236a15582b1a1dd0deb0b1a733c1d40070..4731cad99d1cc8896cc0d6734f6eb48af3135207 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -121,6 +121,14 @@ static const struct proto_ops msg_ops; static struct proto tipc_proto; static struct proto tipc_proto_kern; +static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { + [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 }, + [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 }, + [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED }, + [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG } +}; + /* * Revised TIPC socket locking policy: * @@ -236,12 +244,12 @@ static void tsk_advance_rx_queue(struct sock *sk) */ static void tsk_rej_rx_queue(struct sock *sk) { - struct sk_buff *buf; + struct sk_buff *skb; u32 dnode; - while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { - if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) - tipc_link_xmit(buf, dnode, 0); + while ((skb = __skb_dequeue(&sk->sk_receive_queue))) { + if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) + tipc_link_xmit_skb(skb, dnode, 0); } } @@ -454,7 +462,7 @@ static int tipc_release(struct socket *sock) { struct sock *sk = sock->sk; struct tipc_sock *tsk; - struct sk_buff *buf; + struct sk_buff *skb; u32 dnode; /* @@ -473,11 +481,11 @@ static int tipc_release(struct socket *sock) */ dnode = tsk_peer_node(tsk); while (sock->state != SS_DISCONNECTING) { - buf = __skb_dequeue(&sk->sk_receive_queue); - if (buf == NULL) + skb = __skb_dequeue(&sk->sk_receive_queue); + if (skb == NULL) break; - if (TIPC_SKB_CB(buf)->handle != NULL) - kfree_skb(buf); + if (TIPC_SKB_CB(skb)->handle != NULL) + kfree_skb(skb); else { if ((sock->state == SS_CONNECTING) || (sock->state == SS_CONNECTED)) { @@ -485,8 +493,8 @@ static int tipc_release(struct socket *sock) tsk->connected = 0; tipc_node_remove_conn(dnode, tsk->ref); } - if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) - tipc_link_xmit(buf, dnode, 0); + if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) + tipc_link_xmit_skb(skb, dnode, 0); } } @@ -494,12 +502,12 @@ static int tipc_release(struct socket *sock) tipc_sk_ref_discard(tsk->ref); k_cancel_timer(&tsk->timer); if (tsk->connected) { - buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, tipc_own_addr, tsk_peer_port(tsk), tsk->ref, TIPC_ERR_NO_PORT); - if (buf) - tipc_link_xmit(buf, dnode, tsk->ref); + if (skb) + tipc_link_xmit_skb(skb, dnode, tsk->ref); tipc_node_remove_conn(dnode, tsk->ref); } k_term_timer(&tsk->timer); @@ -692,7 +700,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, * tipc_sendmcast - send multicast message * @sock: socket structure * @seq: destination address - * @iov: message data to send + * @msg: message to send * @dsz: total length of message data * @timeo: timeout to wait for wakeup * @@ -700,11 +708,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, * Returns the number of bytes sent on success, or errno */ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, - struct iovec *iov, size_t dsz, long timeo) + struct msghdr *msg, size_t dsz, long timeo) { struct sock *sk = sock->sk; struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; - struct sk_buff *buf; + struct sk_buff_head head; uint mtu; int rc; @@ -719,12 +727,13 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, new_mtu: mtu = tipc_bclink_get_mtu(); - rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); + __skb_queue_head_init(&head); + rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head); if (unlikely(rc < 0)) return rc; do { - rc = tipc_bclink_xmit(buf); + rc = tipc_bclink_xmit(&head); if (likely(rc >= 0)) { rc = dsz; break; @@ -736,7 +745,7 @@ new_mtu: tipc_sk(sk)->link_cong = 1; rc = tipc_wait_for_sndmsg(sock, &timeo); if (rc) - kfree_skb_list(buf); + __skb_queue_purge(&head); } while (!rc); return rc; } @@ -818,39 +827,6 @@ exit: return TIPC_OK; } -/** - * dest_name_check - verify user is permitted to send to specified port name - * @dest: destination address - * @m: descriptor for message to be sent - * - * Prevents restricted configuration commands from being issued by - * unauthorized users. - * - * Returns 0 if permission is granted, otherwise errno - */ -static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) -{ - struct tipc_cfg_msg_hdr hdr; - - if (unlikely(dest->addrtype == TIPC_ADDR_ID)) - return 0; - if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES)) - return 0; - if (likely(dest->addr.name.name.type == TIPC_TOP_SRV)) - return 0; - if (likely(dest->addr.name.name.type != TIPC_CFG_SRV)) - return -EACCES; - - if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr))) - return -EMSGSIZE; - if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr))) - return -EFAULT; - if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN))) - return -EACCES; - - return 0; -} - static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) { struct sock *sk = sock->sk; @@ -897,13 +873,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); struct tipc_msg *mhdr = &tsk->phdr; - struct iovec *iov = m->msg_iov; u32 dnode, dport; - struct sk_buff *buf; + struct sk_buff_head head; + struct sk_buff *skb; struct tipc_name_seq *seq = &dest->addr.nameseq; u32 mtu; long timeo; - int rc = -EINVAL; + int rc; if (unlikely(!dest)) return -EDESTADDRREQ; @@ -936,14 +912,11 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, tsk->conn_instance = dest->addr.name.name.instance; } } - rc = dest_name_check(dest, m); - if (rc) - goto exit; timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); if (dest->addrtype == TIPC_ADDR_MCAST) { - rc = tipc_sendmcast(sock, seq, iov, dsz, timeo); + rc = tipc_sendmcast(sock, seq, m, dsz, timeo); goto exit; } else if (dest->addrtype == TIPC_ADDR_NAME) { u32 type = dest->addr.name.name.type; @@ -974,13 +947,15 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, new_mtu: mtu = tipc_node_get_mtu(dnode, tsk->ref); - rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); + __skb_queue_head_init(&head); + rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head); if (rc < 0) goto exit; do { - TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong; - rc = tipc_link_xmit(buf, dnode, tsk->ref); + skb = skb_peek(&head); + TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; + rc = tipc_link_xmit(&head, dnode, tsk->ref); if (likely(rc >= 0)) { if (sock->state != SS_READY) sock->state = SS_CONNECTING; @@ -994,7 +969,7 @@ new_mtu: tsk->link_cong = 1; rc = tipc_wait_for_sndmsg(sock, &timeo); if (rc) - kfree_skb_list(buf); + __skb_queue_purge(&head); } while (!rc); exit: if (iocb) @@ -1051,7 +1026,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); struct tipc_msg *mhdr = &tsk->phdr; - struct sk_buff *buf; + struct sk_buff_head head; DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); u32 ref = tsk->ref; int rc = -EINVAL; @@ -1086,12 +1061,13 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, next: mtu = tsk->max_pkt; send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); - rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf); + __skb_queue_head_init(&head); + rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head); if (unlikely(rc < 0)) goto exit; do { if (likely(!tsk_conn_cong(tsk))) { - rc = tipc_link_xmit(buf, dnode, ref); + rc = tipc_link_xmit(&head, dnode, ref); if (likely(!rc)) { tsk->sent_unacked++; sent += send; @@ -1109,7 +1085,7 @@ next: } rc = tipc_wait_for_sndpkt(sock, &timeo); if (rc) - kfree_skb_list(buf); + __skb_queue_purge(&head); } while (!rc); exit: if (iocb) @@ -1254,20 +1230,20 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) { - struct sk_buff *buf = NULL; + struct sk_buff *skb = NULL; struct tipc_msg *msg; u32 peer_port = tsk_peer_port(tsk); u32 dnode = tsk_peer_node(tsk); if (!tsk->connected) return; - buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, + skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, tipc_own_addr, peer_port, tsk->ref, TIPC_OK); - if (!buf) + if (!skb) return; - msg = buf_msg(buf); + msg = buf_msg(skb); msg_set_msgcnt(msg, ack); - tipc_link_xmit(buf, dnode, msg_link_selector(msg)); + tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg)); } static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) @@ -1372,8 +1348,7 @@ restart: sz = buf_len; m->msg_flags |= MSG_TRUNC; } - res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg), - m->msg_iov, sz); + res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz); if (res) goto exit; res = sz; @@ -1473,8 +1448,8 @@ restart: needed = (buf_len - sz_copied); sz_to_copy = (sz <= needed) ? sz : needed; - res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset, - m->msg_iov, sz_to_copy); + res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset, + m, sz_to_copy); if (res) goto exit; @@ -1556,7 +1531,7 @@ static void tipc_data_ready(struct sock *sk) * @tsk: TIPC socket * @msg: message * - * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise + * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise */ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) { @@ -1723,20 +1698,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf) /** * tipc_backlog_rcv - handle incoming message from backlog queue * @sk: socket - * @buf: message + * @skb: message * * Caller must hold socket lock, but not port lock. * * Returns 0 */ -static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) +static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) { int rc; u32 onode; struct tipc_sock *tsk = tipc_sk(sk); - uint truesize = buf->truesize; + uint truesize = skb->truesize; - rc = filter_rcv(sk, buf); + rc = filter_rcv(sk, skb); if (likely(!rc)) { if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT) @@ -1744,25 +1719,25 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) return 0; } - if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc)) + if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc)) return 0; - tipc_link_xmit(buf, onode, 0); + tipc_link_xmit_skb(skb, onode, 0); return 0; } /** * tipc_sk_rcv - handle incoming message - * @buf: buffer containing arriving message + * @skb: buffer containing arriving message * Consumes buffer * Returns 0 if success, or errno: -EHOSTUNREACH */ -int tipc_sk_rcv(struct sk_buff *buf) +int tipc_sk_rcv(struct sk_buff *skb) { struct tipc_sock *tsk; struct sock *sk; - u32 dport = msg_destport(buf_msg(buf)); + u32 dport = msg_destport(buf_msg(skb)); int rc = TIPC_OK; uint limit; u32 dnode; @@ -1770,7 +1745,7 @@ int tipc_sk_rcv(struct sk_buff *buf) /* Validate destination and message */ tsk = tipc_sk_get(dport); if (unlikely(!tsk)) { - rc = tipc_msg_eval(buf, &dnode); + rc = tipc_msg_eval(skb, &dnode); goto exit; } sk = &tsk->sk; @@ -1779,12 +1754,12 @@ int tipc_sk_rcv(struct sk_buff *buf) spin_lock_bh(&sk->sk_lock.slock); if (!sock_owned_by_user(sk)) { - rc = filter_rcv(sk, buf); + rc = filter_rcv(sk, skb); } else { if (sk->sk_backlog.len == 0) atomic_set(&tsk->dupl_rcvcnt, 0); - limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt); - if (sk_add_backlog(sk, buf, limit)) + limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt); + if (sk_add_backlog(sk, skb, limit)) rc = -TIPC_ERR_OVERLOAD; } spin_unlock_bh(&sk->sk_lock.slock); @@ -1792,10 +1767,10 @@ int tipc_sk_rcv(struct sk_buff *buf) if (likely(!rc)) return 0; exit: - if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc)) + if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc)) return -EHOSTUNREACH; - tipc_link_xmit(buf, dnode, 0); + tipc_link_xmit_skb(skb, dnode, 0); return (rc < 0) ? -EHOSTUNREACH : 0; } @@ -2053,7 +2028,7 @@ static int tipc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct sk_buff *buf; + struct sk_buff *skb; u32 dnode; int res; @@ -2068,23 +2043,23 @@ static int tipc_shutdown(struct socket *sock, int how) restart: /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ - buf = __skb_dequeue(&sk->sk_receive_queue); - if (buf) { - if (TIPC_SKB_CB(buf)->handle != NULL) { - kfree_skb(buf); + skb = __skb_dequeue(&sk->sk_receive_queue); + if (skb) { + if (TIPC_SKB_CB(skb)->handle != NULL) { + kfree_skb(skb); goto restart; } - if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN)) - tipc_link_xmit(buf, dnode, tsk->ref); + if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN)) + tipc_link_xmit_skb(skb, dnode, tsk->ref); tipc_node_remove_conn(dnode, tsk->ref); } else { dnode = tsk_peer_node(tsk); - buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, + skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, tipc_own_addr, tsk_peer_port(tsk), tsk->ref, TIPC_CONN_SHUTDOWN); - tipc_link_xmit(buf, dnode, tsk->ref); + tipc_link_xmit_skb(skb, dnode, tsk->ref); } tsk->connected = 0; sock->state = SS_DISCONNECTING; @@ -2113,7 +2088,7 @@ static void tipc_sk_timeout(unsigned long ref) { struct tipc_sock *tsk; struct sock *sk; - struct sk_buff *buf = NULL; + struct sk_buff *skb = NULL; u32 peer_port, peer_node; tsk = tipc_sk_get(ref); @@ -2131,20 +2106,20 @@ static void tipc_sk_timeout(unsigned long ref) if (tsk->probing_state == TIPC_CONN_PROBING) { /* Previous probe not answered -> self abort */ - buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, tipc_own_addr, peer_node, ref, peer_port, TIPC_ERR_NO_PORT); } else { - buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, + skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, peer_node, tipc_own_addr, peer_port, ref, TIPC_OK); tsk->probing_state = TIPC_CONN_PROBING; k_start_timer(&tsk->timer, tsk->probing_interval); } bh_unlock_sock(sk); - if (buf) - tipc_link_xmit(buf, peer_node, ref); + if (skb) + tipc_link_xmit_skb(skb, peer_node, ref); exit: tipc_sk_put(tsk); } @@ -2802,3 +2777,233 @@ void tipc_socket_stop(void) sock_unregister(tipc_family_ops.family); proto_unregister(&tipc_proto); } + +/* Caller should hold socket lock for the passed tipc socket. */ +static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) +{ + u32 peer_node; + u32 peer_port; + struct nlattr *nest; + + peer_node = tsk_peer_node(tsk); + peer_port = tsk_peer_port(tsk); + + nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); + + if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) + goto msg_full; + if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) + goto msg_full; + + if (tsk->conn_type != 0) { + if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) + goto msg_full; + if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) + goto msg_full; + if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) + goto msg_full; + } + nla_nest_end(skb, nest); + + return 0; + +msg_full: + nla_nest_cancel(skb, nest); + + return -EMSGSIZE; +} + +/* Caller should hold socket lock for the passed tipc socket. */ +static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, + struct tipc_sock *tsk) +{ + int err; + void *hdr; + struct nlattr *attrs; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); + if (!hdr) + goto msg_cancel; + + attrs = nla_nest_start(skb, TIPC_NLA_SOCK); + if (!attrs) + goto genlmsg_cancel; + if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref)) + goto attr_msg_cancel; + if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr)) + goto attr_msg_cancel; + + if (tsk->connected) { + err = __tipc_nl_add_sk_con(skb, tsk); + if (err) + goto attr_msg_cancel; + } else if (!list_empty(&tsk->publications)) { + if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) + goto attr_msg_cancel; + } + nla_nest_end(skb, attrs); + genlmsg_end(skb, hdr); + + return 0; + +attr_msg_cancel: + nla_nest_cancel(skb, attrs); +genlmsg_cancel: + genlmsg_cancel(skb, hdr); +msg_cancel: + return -EMSGSIZE; +} + +int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + struct tipc_sock *tsk; + u32 prev_ref = cb->args[0]; + u32 ref = prev_ref; + + tsk = tipc_sk_get_next(&ref); + for (; tsk; tsk = tipc_sk_get_next(&ref)) { + lock_sock(&tsk->sk); + err = __tipc_nl_add_sk(skb, cb, tsk); + release_sock(&tsk->sk); + tipc_sk_put(tsk); + if (err) + break; + + prev_ref = ref; + } + + cb->args[0] = prev_ref; + + return skb->len; +} + +/* Caller should hold socket lock for the passed tipc socket. */ +static int __tipc_nl_add_sk_publ(struct sk_buff *skb, + struct netlink_callback *cb, + struct publication *publ) +{ + void *hdr; + struct nlattr *attrs; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); + if (!hdr) + goto msg_cancel; + + attrs = nla_nest_start(skb, TIPC_NLA_PUBL); + if (!attrs) + goto genlmsg_cancel; + + if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) + goto attr_msg_cancel; + if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) + goto attr_msg_cancel; + if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) + goto attr_msg_cancel; + if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) + goto attr_msg_cancel; + + nla_nest_end(skb, attrs); + genlmsg_end(skb, hdr); + + return 0; + +attr_msg_cancel: + nla_nest_cancel(skb, attrs); +genlmsg_cancel: + genlmsg_cancel(skb, hdr); +msg_cancel: + return -EMSGSIZE; +} + +/* Caller should hold socket lock for the passed tipc socket. */ +static int __tipc_nl_list_sk_publ(struct sk_buff *skb, + struct netlink_callback *cb, + struct tipc_sock *tsk, u32 *last_publ) +{ + int err; + struct publication *p; + + if (*last_publ) { + list_for_each_entry(p, &tsk->publications, pport_list) { + if (p->key == *last_publ) + break; + } + if (p->key != *last_publ) { + /* We never set seq or call nl_dump_check_consistent() + * this means that setting prev_seq here will cause the + * consistence check to fail in the netlink callback + * handler. Resulting in the last NLMSG_DONE message + * having the NLM_F_DUMP_INTR flag set. + */ + cb->prev_seq = 1; + *last_publ = 0; + return -EPIPE; + } + } else { + p = list_first_entry(&tsk->publications, struct publication, + pport_list); + } + + list_for_each_entry_from(p, &tsk->publications, pport_list) { + err = __tipc_nl_add_sk_publ(skb, cb, p); + if (err) { + *last_publ = p->key; + return err; + } + } + *last_publ = 0; + + return 0; +} + +int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + u32 tsk_ref = cb->args[0]; + u32 last_publ = cb->args[1]; + u32 done = cb->args[2]; + struct tipc_sock *tsk; + + if (!tsk_ref) { + struct nlattr **attrs; + struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; + + err = tipc_nlmsg_parse(cb->nlh, &attrs); + if (err) + return err; + + err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, + attrs[TIPC_NLA_SOCK], + tipc_nl_sock_policy); + if (err) + return err; + + if (!sock[TIPC_NLA_SOCK_REF]) + return -EINVAL; + + tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); + } + + if (done) + return 0; + + tsk = tipc_sk_get(tsk_ref); + if (!tsk) + return -EINVAL; + + lock_sock(&tsk->sk); + err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); + if (!err) + done = 1; + release_sock(&tsk->sk); + tipc_sk_put(tsk); + + cb->args[0] = tsk_ref; + cb->args[1] = last_publ; + cb->args[2] = done; + + return skb->len; +} diff --git a/net/tipc/socket.h b/net/tipc/socket.h index baa43d03901e5b093238bf4a92b01135c109d5a2..d3408938700677e427b7ccbfe80a2dcf009f095f 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h @@ -36,6 +36,7 @@ #define _TIPC_SOCK_H #include +#include #define TIPC_CONNACK_INTV 256 #define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) @@ -47,5 +48,7 @@ void tipc_sk_mcast_rcv(struct sk_buff *buf); void tipc_sk_reinit(void); int tipc_sk_ref_table_init(u32 requested_size, u32 start); void tipc_sk_ref_table_stop(void); +int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb); #endif diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 31b5cb232a4371e3728ed9e7b934c3560f9ed92c..0344206b984f7f168a742726e8ad853940ff9cec 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -305,7 +305,6 @@ static int subscr_subscribe(struct tipc_subscr *s, kfree(sub); return -EINVAL; } - INIT_LIST_HEAD(&sub->nameseq_list); list_add(&sub->subscription_list, &subscriber->subscription_list); sub->subscriber = subscriber; sub->swap = swap; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e968843807320ea66a8b6421974fa182ab9d58aa..8e1b10274b02702345abba0b4b458d8319f2f841 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1516,7 +1516,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, skb_put(skb, len - data_len); skb->data_len = data_len; skb->len = len; - err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len); + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); if (err) goto out_free; @@ -1694,8 +1694,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, skb_put(skb, size - data_len); skb->data_len = data_len; skb->len = size; - err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, - sent, size); + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); if (err) { kfree_skb(skb); goto out_err; @@ -1825,7 +1824,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, else if (size < skb->len - skip) msg->msg_flags |= MSG_TRUNC; - err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size); + err = skb_copy_datagram_msg(skb, skip, msg, size); if (err) goto out_free; @@ -2030,8 +2029,8 @@ again: } chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); - if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip, - msg->msg_iov, chunk)) { + if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip, + msg, chunk)) { if (copied == 0) copied = -EFAULT; break; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 85d232bed87d21f3c23cd695b83defef5a6f22c1..1d0e39c9a3e2a5deb62130bc7fbdc31a4b5288a9 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1013,7 +1013,7 @@ static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, goto out; } - err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len); + err = transport->dgram_enqueue(vsk, remote_addr, msg, len); out: release_sock(sk); @@ -1617,7 +1617,7 @@ static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, */ written = transport->stream_enqueue( - vsk, msg->msg_iov, + vsk, msg, len - total_written); if (written < 0) { err = -ENOMEM; @@ -1739,7 +1739,7 @@ vsock_stream_recvmsg(struct kiocb *kiocb, break; read = transport->stream_dequeue( - vsk, msg->msg_iov, + vsk, msg, len - copied, flags); if (read < 0) { err = -ENOMEM; diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 9bb63ffec4f21424fe90af3a4b017c5ff701c97f..02d2e5229240dd635dfc3eb7b6f81e8aa970bf01 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1697,7 +1697,7 @@ static int vmci_transport_dgram_bind(struct vsock_sock *vsk, static int vmci_transport_dgram_enqueue( struct vsock_sock *vsk, struct sockaddr_vm *remote_addr, - struct iovec *iov, + struct msghdr *msg, size_t len) { int err; @@ -1714,7 +1714,7 @@ static int vmci_transport_dgram_enqueue( if (!dg) return -ENOMEM; - memcpy_fromiovec(VMCI_DG_PAYLOAD(dg), iov, len); + memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len); dg->dst = vmci_make_handle(remote_addr->svm_cid, remote_addr->svm_port); @@ -1773,8 +1773,7 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, } /* Place the datagram payload in the user's iovec. */ - err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov, - payload_len); + err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len); if (err) goto out; @@ -1836,22 +1835,23 @@ static int vmci_transport_connect(struct vsock_sock *vsk) static ssize_t vmci_transport_stream_dequeue( struct vsock_sock *vsk, - struct iovec *iov, + struct msghdr *msg, size_t len, int flags) { if (flags & MSG_PEEK) - return vmci_qpair_peekv(vmci_trans(vsk)->qpair, iov, len, 0); + return vmci_qpair_peekv(vmci_trans(vsk)->qpair, msg, len, 0); else - return vmci_qpair_dequev(vmci_trans(vsk)->qpair, iov, len, 0); + return vmci_qpair_dequev(vmci_trans(vsk)->qpair, msg, len, 0); } static ssize_t vmci_transport_stream_enqueue( struct vsock_sock *vsk, - struct iovec *iov, + struct msghdr *msg, size_t len) { - return vmci_qpair_enquev(vmci_trans(vsk)->qpair, iov, len, 0); + /* XXX: stripping const */ + return vmci_qpair_enquev(vmci_trans(vsk)->qpair, (struct iovec *)msg->msg_iter.iov, len, 0); } static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk) diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 29c8675f9a1189db65c185f2ad04f96a67702989..22ba971741e5c998c5f314e083ceead4b2f6a94f 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -175,7 +175,7 @@ config CFG80211_INTERNAL_REGDB Most distributions have a CRDA package. So if unsure, say N. config CFG80211_WEXT - bool "cfg80211 wireless extensions compatibility" + bool depends on CFG80211 select WEXT_CORE help diff --git a/net/wireless/Makefile b/net/wireless/Makefile index a761670af31dd7076b32e7f61f26bb4b94b22728..4c9e39f04ef8fae38e21070fddbb89bb5a150863 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_WEXT_SPY) += wext-spy.o obj-$(CONFIG_WEXT_PRIV) += wext-priv.o cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o -cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o +cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o ocb.o cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 72d81e2154d590dbba26bf2d1e0868c5c4940e26..85506f1d078920117b1030abc1e50a0ec70fdc6b 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -115,7 +115,7 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) EXPORT_SYMBOL(cfg80211_chandef_valid); static void chandef_primary_freqs(const struct cfg80211_chan_def *c, - int *pri40, int *pri80) + u32 *pri40, u32 *pri80) { int tmp; @@ -366,6 +366,7 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy, break; case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: @@ -892,6 +893,13 @@ cfg80211_get_chan_state(struct wireless_dev *wdev, *radar_detect |= BIT(wdev->chandef.width); } return; + case NL80211_IFTYPE_OCB: + if (wdev->chandef.chan) { + *chan = wdev->chandef.chan; + *chanmode = CHAN_MODE_SHARED; + return; + } + break; case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: diff --git a/net/wireless/core.c b/net/wireless/core.c index f52a4cd7017c855c1c64f9ee2bbe2b0b44b625cc..53dda7728f866dd3e7df234a242668c5dbe04f20 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -86,11 +86,11 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) return &rdev->wiphy; } -int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, - char *newname) +static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev, + const char *newname) { struct cfg80211_registered_device *rdev2; - int wiphy_idx, taken = -1, result, digits; + int wiphy_idx, taken = -1, digits; ASSERT_RTNL(); @@ -109,15 +109,28 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, return -EINVAL; } + /* Ensure another device does not already have this name. */ + list_for_each_entry(rdev2, &cfg80211_rdev_list, list) + if (strcmp(newname, wiphy_name(&rdev2->wiphy)) == 0) + return -EINVAL; + + return 0; +} + +int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, + char *newname) +{ + int result; + + ASSERT_RTNL(); /* Ignore nop renames */ - if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0) + if (strcmp(newname, wiphy_name(&rdev->wiphy)) == 0) return 0; - /* Ensure another device does not already have this name. */ - list_for_each_entry(rdev2, &cfg80211_rdev_list, list) - if (strcmp(newname, dev_name(&rdev2->wiphy.dev)) == 0) - return -EINVAL; + result = cfg80211_dev_check_name(rdev, newname); + if (result < 0) + return result; result = device_rename(&rdev->wiphy.dev, newname); if (result) @@ -309,7 +322,8 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work) /* exported functions */ -struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) +struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, + const char *requested_name) { static atomic_t wiphy_counter = ATOMIC_INIT(0); @@ -346,7 +360,31 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) rdev->wiphy_idx--; /* give it a proper name */ - dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); + if (requested_name && requested_name[0]) { + int rv; + + rtnl_lock(); + rv = cfg80211_dev_check_name(rdev, requested_name); + + if (rv < 0) { + rtnl_unlock(); + goto use_default_name; + } + + rv = dev_set_name(&rdev->wiphy.dev, "%s", requested_name); + rtnl_unlock(); + if (rv) + goto use_default_name; + } else { +use_default_name: + /* NOTE: This is *probably* safe w/out holding rtnl because of + * the restrictions on phy names. Probably this call could + * fail if some other part of the kernel (re)named a device + * phyX. But, might should add some locking and check return + * value, and use a different name if this one exists? + */ + dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); + } INIT_LIST_HEAD(&rdev->wdev_list); INIT_LIST_HEAD(&rdev->beacon_registrations); @@ -406,7 +444,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) return &rdev->wiphy; } -EXPORT_SYMBOL(wiphy_new); +EXPORT_SYMBOL(wiphy_new_nm); static int wiphy_verify_combinations(struct wiphy *wiphy) { @@ -503,6 +541,24 @@ int wiphy_register(struct wiphy *wiphy) !wiphy->wowlan->tcp)) return -EINVAL; #endif + if (WARN_ON((wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) && + (!rdev->ops->tdls_channel_switch || + !rdev->ops->tdls_cancel_channel_switch))) + return -EINVAL; + + /* + * if a wiphy has unsupported modes for regulatory channel enforcement, + * opt-out of enforcement checking + */ + if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_AP_VLAN) | + BIT(NL80211_IFTYPE_MONITOR))) + wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF; if (WARN_ON(wiphy->coalesce && (!wiphy->coalesce->n_rules || @@ -831,7 +887,22 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev, case NL80211_IFTYPE_P2P_GO: __cfg80211_stop_ap(rdev, dev, true); break; - default: + case NL80211_IFTYPE_OCB: + __cfg80211_leave_ocb(rdev, dev); + break; + case NL80211_IFTYPE_WDS: + /* must be handled by mac80211/driver, has no APIs */ + break; + case NL80211_IFTYPE_P2P_DEVICE: + /* cannot happen, has no netdev */ + break; + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_MONITOR: + /* nothing to do */ + break; + case NL80211_IFTYPE_UNSPECIFIED: + case NUM_NL80211_IFTYPES: + /* invalid */ break; } } diff --git a/net/wireless/core.h b/net/wireless/core.h index 7e3a3cef7df93b4c6936515f05f91d2ec14446ea..faa5b1609aaebb8991ff3c6312735082a42e6e3e 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -111,6 +111,7 @@ cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev) rdev->wiphy.wowlan_config->tcp->sock) sock_release(rdev->wiphy.wowlan_config->tcp->sock); kfree(rdev->wiphy.wowlan_config->tcp); + kfree(rdev->wiphy.wowlan_config->nd_config); kfree(rdev->wiphy.wowlan_config); #endif } @@ -290,6 +291,18 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_chan_def *chandef); +/* OCB */ +int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ocb_setup *setup); +int cfg80211_join_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ocb_setup *setup); +int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev); +int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev); + /* AP */ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, bool notify); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5839c85075f15407e86d84526cfb29087c70aa81..a17d6bc6b22ca7ccda9716af9a3367b66bc47632 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -209,7 +209,7 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info) } /* policy for the attributes */ -static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { +static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, .len = 20-1 }, @@ -388,13 +388,14 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN }, [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 }, [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 }, - [NL80211_ATTR_IFACE_SOCKET_OWNER] = { .type = NLA_FLAG }, + [NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY }, [NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG }, [NL80211_ATTR_TSID] = { .type = NLA_U8 }, [NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 }, [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 }, [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 }, + [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN }, }; /* policy for the key attributes */ @@ -428,6 +429,7 @@ nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = { [NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_TCP_CONNECTION] = { .type = NLA_NESTED }, + [NL80211_WOWLAN_TRIG_NET_DETECT] = { .type = NLA_NESTED }, }; static const struct nla_policy @@ -884,7 +886,12 @@ static int nl80211_key_allowed(struct wireless_dev *wdev) if (!wdev->current_bss) return -ENOLINK; break; - default: + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_OCB: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_WDS: + case NUM_NL80211_IFTYPES: return -EINVAL; } @@ -1083,6 +1090,8 @@ static int nl80211_send_wowlan(struct sk_buff *msg, if (large && nl80211_send_wowlan_tcp_caps(rdev, msg)) return -ENOBUFS; + /* TODO: send wowlan net detect */ + nla_nest_end(msg, nl_wowlan); return 0; @@ -1514,8 +1523,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) CMD(channel_switch, CHANNEL_SWITCH); CMD(set_qos_map, SET_QOS_MAP); - if (rdev->wiphy.flags & - WIPHY_FLAG_SUPPORTS_WMM_ADMISSION) + if (rdev->wiphy.features & + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION) CMD(add_tx_ts, ADD_TX_TS); } /* add into the if now */ @@ -2308,7 +2317,8 @@ static inline u64 wdev_id(struct wireless_dev *wdev) static int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef) { - WARN_ON(!cfg80211_chandef_valid(chandef)); + if (WARN_ON(!cfg80211_chandef_valid(chandef))) + return -EINVAL; if (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chandef->chan->center_freq)) @@ -2336,12 +2346,16 @@ static int nl80211_send_chandef(struct sk_buff *msg, static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct cfg80211_registered_device *rdev, - struct wireless_dev *wdev) + struct wireless_dev *wdev, bool removal) { struct net_device *dev = wdev->netdev; + u8 cmd = NL80211_CMD_NEW_INTERFACE; void *hdr; - hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_INTERFACE); + if (removal) + cmd = NL80211_CMD_DEL_INTERFACE; + + hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) return -1; @@ -2408,7 +2422,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * } if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - rdev, wdev) < 0) { + rdev, wdev, false) < 0) { goto out; } if_idx++; @@ -2436,7 +2450,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0, - rdev, wdev) < 0) { + rdev, wdev, false) < 0) { nlmsg_free(msg); return -ENOBUFS; } @@ -2582,7 +2596,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct vif_params params; struct wireless_dev *wdev; - struct sk_buff *msg; + struct sk_buff *msg, *event; int err; enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; u32 flags; @@ -2605,7 +2619,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) !(rdev->wiphy.interface_modes & (1 << type))) return -EOPNOTSUPP; - if (type == NL80211_IFTYPE_P2P_DEVICE && info->attrs[NL80211_ATTR_MAC]) { + if ((type == NL80211_IFTYPE_P2P_DEVICE || + rdev->wiphy.features & NL80211_FEATURE_MAC_ON_CREATE) && + info->attrs[NL80211_ATTR_MAC]) { nla_memcpy(params.macaddr, info->attrs[NL80211_ATTR_MAC], ETH_ALEN); if (!is_valid_ether_addr(params.macaddr)) @@ -2634,12 +2650,15 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) wdev = rdev_add_virtual_intf(rdev, nla_data(info->attrs[NL80211_ATTR_IFNAME]), type, err ? NULL : &flags, ¶ms); - if (IS_ERR(wdev)) { + if (WARN_ON(!wdev)) { + nlmsg_free(msg); + return -EPROTO; + } else if (IS_ERR(wdev)) { nlmsg_free(msg); return PTR_ERR(wdev); } - if (info->attrs[NL80211_ATTR_IFACE_SOCKET_OWNER]) + if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) wdev->owner_nlportid = info->snd_portid; switch (type) { @@ -2675,11 +2694,25 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) } if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0, - rdev, wdev) < 0) { + rdev, wdev, false) < 0) { nlmsg_free(msg); return -ENOBUFS; } + event = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (event) { + if (nl80211_send_iface(event, 0, 0, 0, + rdev, wdev, false) < 0) { + nlmsg_free(event); + goto out; + } + + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), + event, 0, NL80211_MCGRP_CONFIG, + GFP_KERNEL); + } + +out: return genlmsg_reply(msg, info); } @@ -2687,10 +2720,18 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; + struct sk_buff *msg; + int status; if (!rdev->ops->del_virtual_intf) return -EOPNOTSUPP; + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (msg && nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, true) < 0) { + nlmsg_free(msg); + msg = NULL; + } + /* * If we remove a wireless device without a netdev then clear * user_ptr[1] so that nl80211_post_doit won't dereference it @@ -2701,7 +2742,15 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) if (!wdev->netdev) info->user_ptr[1] = NULL; - return rdev_del_virtual_intf(rdev, wdev); + status = rdev_del_virtual_intf(rdev, wdev); + if (status >= 0 && msg) + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), + msg, 0, NL80211_MCGRP_CONFIG, + GFP_KERNEL); + else + nlmsg_free(msg); + + return status; } static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) @@ -4398,10 +4447,12 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; - u8 *mac_addr = NULL; + struct station_del_parameters params; + + memset(¶ms, 0, sizeof(params)); if (info->attrs[NL80211_ATTR_MAC]) - mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + params.mac = nla_data(info->attrs[NL80211_ATTR_MAC]); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && @@ -4412,7 +4463,28 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->del_station) return -EOPNOTSUPP; - return rdev_del_station(rdev, dev, mac_addr); + if (info->attrs[NL80211_ATTR_MGMT_SUBTYPE]) { + params.subtype = + nla_get_u8(info->attrs[NL80211_ATTR_MGMT_SUBTYPE]); + if (params.subtype != IEEE80211_STYPE_DISASSOC >> 4 && + params.subtype != IEEE80211_STYPE_DEAUTH >> 4) + return -EINVAL; + } else { + /* Default to Deauthentication frame */ + params.subtype = IEEE80211_STYPE_DEAUTH >> 4; + } + + if (info->attrs[NL80211_ATTR_REASON_CODE]) { + params.reason_code = + nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); + if (params.reason_code == 0) + return -EINVAL; /* 0 is reserved */ + } else { + /* Default to reason code 2 */ + params.reason_code = WLAN_REASON_PREV_AUTH_NOT_VALID; + } + + return rdev_del_station(rdev, dev, ¶ms); } static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq, @@ -4423,7 +4495,7 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq, void *hdr; struct nlattr *pinfoattr; - hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_STATION); + hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_MPATH); if (!hdr) return -1; @@ -4624,6 +4696,96 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) return rdev_del_mpath(rdev, dev, dst); } +static int nl80211_get_mpp(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + int err; + struct net_device *dev = info->user_ptr[1]; + struct mpath_info pinfo; + struct sk_buff *msg; + u8 *dst = NULL; + u8 mpp[ETH_ALEN]; + + memset(&pinfo, 0, sizeof(pinfo)); + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + dst = nla_data(info->attrs[NL80211_ATTR_MAC]); + + if (!rdev->ops->get_mpp) + return -EOPNOTSUPP; + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) + return -EOPNOTSUPP; + + err = rdev_get_mpp(rdev, dev, dst, mpp, &pinfo); + if (err) + return err; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (nl80211_send_mpath(msg, info->snd_portid, info->snd_seq, 0, + dev, dst, mpp, &pinfo) < 0) { + nlmsg_free(msg); + return -ENOBUFS; + } + + return genlmsg_reply(msg, info); +} + +static int nl80211_dump_mpp(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct mpath_info pinfo; + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + u8 dst[ETH_ALEN]; + u8 mpp[ETH_ALEN]; + int path_idx = cb->args[2]; + int err; + + err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); + if (err) + return err; + + if (!rdev->ops->dump_mpp) { + err = -EOPNOTSUPP; + goto out_err; + } + + if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) { + err = -EOPNOTSUPP; + goto out_err; + } + + while (1) { + err = rdev_dump_mpp(rdev, wdev->netdev, path_idx, dst, + mpp, &pinfo); + if (err == -ENOENT) + break; + if (err) + goto out_err; + + if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + wdev->netdev, dst, mpp, + &pinfo) < 0) + goto out; + + path_idx++; + } + + out: + cb->args[2] = path_idx; + err = skb->len; + out_err: + nl80211_finish_wdev_dump(rdev); + return err; +} + static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; @@ -5260,11 +5422,11 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; struct nlattr *nl_reg_rule; - char *alpha2 = NULL; - int rem_reg_rules = 0, r = 0; + char *alpha2; + int rem_reg_rules, r; u32 num_rules = 0, rule_idx = 0, size_of_regd; enum nl80211_dfs_regions dfs_region = NL80211_DFS_UNSET; - struct ieee80211_regdomain *rd = NULL; + struct ieee80211_regdomain *rd; if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) return -EINVAL; @@ -5358,6 +5520,43 @@ static int validate_scan_freqs(struct nlattr *freqs) return n_channels; } +static int nl80211_parse_random_mac(struct nlattr **attrs, + u8 *mac_addr, u8 *mac_addr_mask) +{ + int i; + + if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) { + memset(mac_addr, 0, ETH_ALEN); + memset(mac_addr_mask, 0, ETH_ALEN); + mac_addr[0] = 0x2; + mac_addr_mask[0] = 0x3; + + return 0; + } + + /* need both or none */ + if (!attrs[NL80211_ATTR_MAC] || !attrs[NL80211_ATTR_MAC_MASK]) + return -EINVAL; + + memcpy(mac_addr, nla_data(attrs[NL80211_ATTR_MAC]), ETH_ALEN); + memcpy(mac_addr_mask, nla_data(attrs[NL80211_ATTR_MAC_MASK]), ETH_ALEN); + + /* don't allow or configure an mcast address */ + if (!is_multicast_ether_addr(mac_addr_mask) || + is_multicast_ether_addr(mac_addr)) + return -EINVAL; + + /* + * allow users to pass a MAC address that has bits set outside + * of the mask, but don't bother drivers with having to deal + * with such bits + */ + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] &= mac_addr_mask[i]; + + return 0; +} + static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; @@ -5535,6 +5734,25 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) err = -EOPNOTSUPP; goto out_free; } + + if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + if (!(wiphy->features & + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR)) { + err = -EOPNOTSUPP; + goto out_free; + } + + if (wdev->current_bss) { + err = -EOPNOTSUPP; + goto out_free; + } + + err = nl80211_parse_random_mac(info->attrs, + request->mac_addr, + request->mac_addr_mask); + if (err) + goto out_free; + } } request->no_cck = @@ -5561,14 +5779,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) return err; } -static int nl80211_start_sched_scan(struct sk_buff *skb, - struct genl_info *info) +static struct cfg80211_sched_scan_request * +nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, + struct nlattr **attrs) { struct cfg80211_sched_scan_request *request; - struct cfg80211_registered_device *rdev = info->user_ptr[0]; - struct net_device *dev = info->user_ptr[1]; struct nlattr *attr; - struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i; u32 interval; enum ieee80211_band band; @@ -5576,38 +5792,32 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1]; s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF; - if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || - !rdev->ops->sched_scan_start) - return -EOPNOTSUPP; - - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) - return -EINVAL; + if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE])) + return ERR_PTR(-EINVAL); - if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) - return -EINVAL; + if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) + return ERR_PTR(-EINVAL); - interval = nla_get_u32(info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]); + interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]); if (interval == 0) - return -EINVAL; - - wiphy = &rdev->wiphy; + return ERR_PTR(-EINVAL); - if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { + if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( - info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); + attrs[NL80211_ATTR_SCAN_FREQUENCIES]); if (!n_channels) - return -EINVAL; + return ERR_PTR(-EINVAL); } else { n_channels = ieee80211_get_num_supported_channels(wiphy); } - if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) - nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], + if (attrs[NL80211_ATTR_SCAN_SSIDS]) + nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; if (n_ssids > wiphy->max_sched_scan_ssids) - return -EINVAL; + return ERR_PTR(-EINVAL); /* * First, count the number of 'real' matchsets. Due to an issue with @@ -5618,9 +5828,9 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, * older userspace that treated a matchset with only the RSSI as the * global RSSI for all other matchsets - if there are other matchsets. */ - if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) { + if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) { nla_for_each_nested(attr, - info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH], + attrs[NL80211_ATTR_SCHED_SCAN_MATCH], tmp) { struct nlattr *rssi; @@ -5628,7 +5838,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, nla_data(attr), nla_len(attr), nl80211_match_policy); if (err) - return err; + return ERR_PTR(err); /* add other standalone attributes here */ if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) { n_match_sets++; @@ -5645,30 +5855,23 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, n_match_sets = 1; if (n_match_sets > wiphy->max_match_sets) - return -EINVAL; + return ERR_PTR(-EINVAL); - if (info->attrs[NL80211_ATTR_IE]) - ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + if (attrs[NL80211_ATTR_IE]) + ie_len = nla_len(attrs[NL80211_ATTR_IE]); else ie_len = 0; if (ie_len > wiphy->max_sched_scan_ie_len) - return -EINVAL; - - if (rdev->sched_scan_req) { - err = -EINPROGRESS; - goto out; - } + return ERR_PTR(-EINVAL); request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->match_sets) * n_match_sets + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); - if (!request) { - err = -ENOMEM; - goto out; - } + if (!request) + return ERR_PTR(-ENOMEM); if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; @@ -5693,10 +5896,10 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, request->n_match_sets = n_match_sets; i = 0; - if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { + if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { /* user specified, bail out if channel not found */ nla_for_each_nested(attr, - info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], + attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { struct ieee80211_channel *chan; @@ -5742,8 +5945,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, request->n_channels = i; i = 0; - if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { - nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], + if (attrs[NL80211_ATTR_SCAN_SSIDS]) { + nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; @@ -5757,9 +5960,9 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, } i = 0; - if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) { + if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) { nla_for_each_nested(attr, - info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH], + attrs[NL80211_ATTR_SCHED_SCAN_MATCH], tmp) { struct nlattr *ssid, *rssi; @@ -5814,36 +6017,88 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (ie_len) { request->ie_len = ie_len; memcpy((void *)request->ie, - nla_data(info->attrs[NL80211_ATTR_IE]), + nla_data(attrs[NL80211_ATTR_IE]), request->ie_len); } - if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) { + if (attrs[NL80211_ATTR_SCAN_FLAGS]) { request->flags = nla_get_u32( - info->attrs[NL80211_ATTR_SCAN_FLAGS]); + attrs[NL80211_ATTR_SCAN_FLAGS]); if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) { err = -EOPNOTSUPP; goto out_free; } + + if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + u32 flg = NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR; + + if (!wdev) /* must be net-detect */ + flg = NL80211_FEATURE_ND_RANDOM_MAC_ADDR; + + if (!(wiphy->features & flg)) { + err = -EOPNOTSUPP; + goto out_free; + } + + if (wdev && wdev->current_bss) { + err = -EOPNOTSUPP; + goto out_free; + } + + err = nl80211_parse_random_mac(attrs, request->mac_addr, + request->mac_addr_mask); + if (err) + goto out_free; + } } - request->dev = dev; - request->wiphy = &rdev->wiphy; request->interval = interval; request->scan_start = jiffies; - err = rdev_sched_scan_start(rdev, dev, request); - if (!err) { - rdev->sched_scan_req = request; - nl80211_send_sched_scan(rdev, dev, - NL80211_CMD_START_SCHED_SCAN); - goto out; - } + return request; out_free: kfree(request); -out: + return ERR_PTR(err); +} + +static int nl80211_start_sched_scan(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || + !rdev->ops->sched_scan_start) + return -EOPNOTSUPP; + + if (rdev->sched_scan_req) + return -EINPROGRESS; + + rdev->sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev, + info->attrs); + err = PTR_ERR_OR_ZERO(rdev->sched_scan_req); + if (err) + goto out_err; + + err = rdev_sched_scan_start(rdev, dev, rdev->sched_scan_req); + if (err) + goto out_free; + + rdev->sched_scan_req->dev = dev; + rdev->sched_scan_req->wiphy = &rdev->wiphy; + + nl80211_send_sched_scan(rdev, dev, + NL80211_CMD_START_SCHED_SCAN); + return 0; + +out_free: + kfree(rdev->sched_scan_req); +out_err: + rdev->sched_scan_req = NULL; return err; } @@ -5923,7 +6178,6 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) * function is called under RTNL lock, so this should not be a problem. */ static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1]; - u8 radar_detect_width = 0; int err; bool need_new_beacon = false; int len, i; @@ -6059,10 +6313,8 @@ skip_beacons: if (err < 0) return err; - if (err > 0) { - radar_detect_width = BIT(params.chandef.width); + if (err > 0) params.radar_required = true; - } if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX]) params.block_tx = true; @@ -6311,8 +6563,6 @@ static int nl80211_dump_survey(struct sk_buff *skb, } while (1) { - struct ieee80211_channel *chan; - res = rdev_dump_survey(rdev, wdev->netdev, survey_idx, &survey); if (res == -ENOENT) break; @@ -6325,9 +6575,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, goto out; } - chan = ieee80211_get_channel(&rdev->wiphy, - survey.channel->center_freq); - if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) { + if (survey.channel->flags & IEEE80211_CHAN_DISABLED) { survey_idx++; continue; } @@ -8159,6 +8407,28 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } +static int nl80211_join_ocb(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct ocb_setup setup = {}; + int err; + + err = nl80211_parse_chandef(rdev, info, &setup.chandef); + if (err) + return err; + + return cfg80211_join_ocb(rdev, dev, &setup); +} + +static int nl80211_leave_ocb(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + + return cfg80211_leave_ocb(rdev, dev); +} + static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; @@ -8542,15 +8812,48 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, return 0; } -static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) +static int nl80211_parse_wowlan_nd(struct cfg80211_registered_device *rdev, + const struct wiphy_wowlan_support *wowlan, + struct nlattr *attr, + struct cfg80211_wowlan *trig) { - struct cfg80211_registered_device *rdev = info->user_ptr[0]; - struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; - struct cfg80211_wowlan new_triggers = {}; - struct cfg80211_wowlan *ntrig; - const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan; - int err, i; - bool prev_enabled = rdev->wiphy.wowlan_config; + struct nlattr **tb; + int err; + + tb = kzalloc(NUM_NL80211_ATTR * sizeof(*tb), GFP_KERNEL); + if (!tb) + return -ENOMEM; + + if (!(wowlan->flags & WIPHY_WOWLAN_NET_DETECT)) { + err = -EOPNOTSUPP; + goto out; + } + + err = nla_parse(tb, NL80211_ATTR_MAX, + nla_data(attr), nla_len(attr), + nl80211_policy); + if (err) + goto out; + + trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb); + err = PTR_ERR_OR_ZERO(trig->nd_config); + if (err) + trig->nd_config = NULL; + +out: + kfree(tb); + return err; +} + +static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; + struct cfg80211_wowlan new_triggers = {}; + struct cfg80211_wowlan *ntrig; + const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan; + int err, i; + bool prev_enabled = rdev->wiphy.wowlan_config; if (!wowlan) return -EOPNOTSUPP; @@ -8687,6 +8990,14 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) goto error; } + if (tb[NL80211_WOWLAN_TRIG_NET_DETECT]) { + err = nl80211_parse_wowlan_nd( + rdev, wowlan, tb[NL80211_WOWLAN_TRIG_NET_DETECT], + &new_triggers); + if (err) + goto error; + } + ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL); if (!ntrig) { err = -ENOMEM; @@ -9444,7 +9755,7 @@ static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info) u16 admitted_time = 0; int err; - if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_WMM_ADMISSION)) + if (!(rdev->wiphy.features & NL80211_FEATURE_SUPPORTS_WMM_ADMISSION)) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC] || @@ -9460,12 +9771,10 @@ static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info) return -EINVAL; /* WMM uses TIDs 0-7 even for TSPEC */ - if (tsid < IEEE80211_FIRST_TSPEC_TSID) { - if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_WMM_ADMISSION)) - return -EINVAL; - } else { + if (tsid >= IEEE80211_FIRST_TSPEC_TSID) { /* TODO: handle 802.11 TSPEC/admission control - * need more attributes for that (e.g. BA session requirement) + * need more attributes for that (e.g. BA session requirement); + * change the WMM adminssion test above to allow both then */ return -EINVAL; } @@ -9521,6 +9830,98 @@ static int nl80211_del_tx_ts(struct sk_buff *skb, struct genl_info *info) return err; } +static int nl80211_tdls_channel_switch(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_chan_def chandef = {}; + const u8 *addr; + u8 oper_class; + int err; + + if (!rdev->ops->tdls_channel_switch || + !(rdev->wiphy.features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) + return -EOPNOTSUPP; + + switch (dev->ieee80211_ptr->iftype) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + break; + default: + return -EOPNOTSUPP; + } + + if (!info->attrs[NL80211_ATTR_MAC] || + !info->attrs[NL80211_ATTR_OPER_CLASS]) + return -EINVAL; + + err = nl80211_parse_chandef(rdev, info, &chandef); + if (err) + return err; + + /* + * Don't allow wide channels on the 2.4Ghz band, as per IEEE802.11-2012 + * section 10.22.6.2.1. Disallow 5/10Mhz channels as well for now, the + * specification is not defined for them. + */ + if (chandef.chan->band == IEEE80211_BAND_2GHZ && + chandef.width != NL80211_CHAN_WIDTH_20_NOHT && + chandef.width != NL80211_CHAN_WIDTH_20) + return -EINVAL; + + /* we will be active on the TDLS link */ + if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) + return -EINVAL; + + /* don't allow switching to DFS channels */ + if (cfg80211_chandef_dfs_required(wdev->wiphy, &chandef, wdev->iftype)) + return -EINVAL; + + addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + oper_class = nla_get_u8(info->attrs[NL80211_ATTR_OPER_CLASS]); + + wdev_lock(wdev); + err = rdev_tdls_channel_switch(rdev, dev, addr, oper_class, &chandef); + wdev_unlock(wdev); + + return err; +} + +static int nl80211_tdls_cancel_channel_switch(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + const u8 *addr; + + if (!rdev->ops->tdls_channel_switch || + !rdev->ops->tdls_cancel_channel_switch || + !(rdev->wiphy.features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) + return -EOPNOTSUPP; + + switch (dev->ieee80211_ptr->iftype) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + break; + default: + return -EOPNOTSUPP; + } + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + wdev_lock(wdev); + rdev_tdls_cancel_channel_switch(rdev, dev, addr); + wdev_unlock(wdev); + + return 0; +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -9781,6 +10182,15 @@ static const struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_GET_MPP, + .doit = nl80211_get_mpp, + .dumpit = nl80211_dump_mpp, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, { .cmd = NL80211_CMD_SET_MPATH, .doit = nl80211_set_mpath, @@ -10095,6 +10505,22 @@ static const struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_JOIN_OCB, + .doit = nl80211_join_ocb, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, + { + .cmd = NL80211_CMD_LEAVE_OCB, + .doit = nl80211_leave_ocb, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, #ifdef CONFIG_PM { .cmd = NL80211_CMD_GET_WOWLAN, @@ -10294,6 +10720,22 @@ static const struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH, + .doit = nl80211_tdls_channel_switch, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, + { + .cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, + .doit = nl80211_tdls_cancel_channel_switch, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, }; /* notification functions */ @@ -11325,55 +11767,155 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, } EXPORT_SYMBOL(cfg80211_mgmt_tx_status); -void cfg80211_cqm_rssi_notify(struct net_device *dev, - enum nl80211_cqm_rssi_threshold_event rssi_event, - gfp_t gfp) +static struct sk_buff *cfg80211_prepare_cqm(struct net_device *dev, + const char *mac, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - struct sk_buff *msg; - struct nlattr *pinfoattr; - void *hdr; - - trace_cfg80211_cqm_rssi_notify(dev, rssi_event); + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); + struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + void **cb; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) - return; + return NULL; - hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); - if (!hdr) { + cb = (void **)msg->cb; + + cb[0] = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); + if (!cb[0]) { nlmsg_free(msg); - return; + return NULL; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; - pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); - if (!pinfoattr) + if (mac && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac)) goto nla_put_failure; - if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, - rssi_event)) + cb[1] = nla_nest_start(msg, NL80211_ATTR_CQM); + if (!cb[1]) goto nla_put_failure; - nla_nest_end(msg, pinfoattr); + cb[2] = rdev; - genlmsg_end(msg, hdr); + return msg; + nla_put_failure: + nlmsg_free(msg); + return NULL; +} + +static void cfg80211_send_cqm(struct sk_buff *msg, gfp_t gfp) +{ + void **cb = (void **)msg->cb; + struct cfg80211_registered_device *rdev = cb[2]; + + nla_nest_end(msg, cb[1]); + genlmsg_end(msg, cb[0]); + + memset(msg->cb, 0, sizeof(msg->cb)); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); +} + +void cfg80211_cqm_rssi_notify(struct net_device *dev, + enum nl80211_cqm_rssi_threshold_event rssi_event, + gfp_t gfp) +{ + struct sk_buff *msg; + + trace_cfg80211_cqm_rssi_notify(dev, rssi_event); + + if (WARN_ON(rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW && + rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH)) + return; + + msg = cfg80211_prepare_cqm(dev, NULL, gfp); + if (!msg) + return; + + if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, + rssi_event)) + goto nla_put_failure; + + cfg80211_send_cqm(msg, gfp); + return; nla_put_failure: - genlmsg_cancel(msg, hdr); nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_cqm_rssi_notify); +void cfg80211_cqm_txe_notify(struct net_device *dev, + const u8 *peer, u32 num_packets, + u32 rate, u32 intvl, gfp_t gfp) +{ + struct sk_buff *msg; + + msg = cfg80211_prepare_cqm(dev, peer, gfp); + if (!msg) + return; + + if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets)) + goto nla_put_failure; + + if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate)) + goto nla_put_failure; + + if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl)) + goto nla_put_failure; + + cfg80211_send_cqm(msg, gfp); + return; + + nla_put_failure: + nlmsg_free(msg); +} +EXPORT_SYMBOL(cfg80211_cqm_txe_notify); + +void cfg80211_cqm_pktloss_notify(struct net_device *dev, + const u8 *peer, u32 num_packets, gfp_t gfp) +{ + struct sk_buff *msg; + + trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets); + + msg = cfg80211_prepare_cqm(dev, peer, gfp); + if (!msg) + return; + + if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets)) + goto nla_put_failure; + + cfg80211_send_cqm(msg, gfp); + return; + + nla_put_failure: + nlmsg_free(msg); +} +EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify); + +void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp) +{ + struct sk_buff *msg; + + msg = cfg80211_prepare_cqm(dev, NULL, gfp); + if (!msg) + return; + + if (nla_put_flag(msg, NL80211_ATTR_CQM_BEACON_LOSS_EVENT)) + goto nla_put_failure; + + cfg80211_send_cqm(msg, gfp); + return; + + nla_put_failure: + nlmsg_free(msg); +} +EXPORT_SYMBOL(cfg80211_cqm_beacon_loss_notify); + static void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) @@ -11491,7 +12033,9 @@ EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_chan_def *chandef, - gfp_t gfp) + gfp_t gfp, + enum nl80211_commands notif, + u8 count) { struct sk_buff *msg; void *hdr; @@ -11500,7 +12044,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, if (!msg) return; - hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CH_SWITCH_NOTIFY); + hdr = nl80211hdr_put(msg, 0, 0, 0, notif); if (!hdr) { nlmsg_free(msg); return; @@ -11512,6 +12056,10 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, if (nl80211_send_chandef(msg, chandef)) goto nla_put_failure; + if ((notif == NL80211_CMD_CH_SWITCH_STARTED_NOTIFY) && + (nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count))) + goto nla_put_failure; + genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, @@ -11534,70 +12082,27 @@ void cfg80211_ch_switch_notify(struct net_device *dev, trace_cfg80211_ch_switch_notify(dev, chandef); - if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && - wdev->iftype != NL80211_IFTYPE_P2P_GO && - wdev->iftype != NL80211_IFTYPE_ADHOC && - wdev->iftype != NL80211_IFTYPE_MESH_POINT)) - return; - wdev->chandef = *chandef; wdev->preset_chandef = *chandef; - nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL); + nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, + NL80211_CMD_CH_SWITCH_NOTIFY, 0); } EXPORT_SYMBOL(cfg80211_ch_switch_notify); -void cfg80211_cqm_txe_notify(struct net_device *dev, - const u8 *peer, u32 num_packets, - u32 rate, u32 intvl, gfp_t gfp) +void cfg80211_ch_switch_started_notify(struct net_device *dev, + struct cfg80211_chan_def *chandef, + u8 count) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - struct sk_buff *msg; - struct nlattr *pinfoattr; - void *hdr; - - msg = nlmsg_new(NLMSG_GOODSIZE, gfp); - if (!msg) - return; - hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); - if (!hdr) { - nlmsg_free(msg); - return; - } + trace_cfg80211_ch_switch_started_notify(dev, chandef); - if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || - nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || - nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer)) - goto nla_put_failure; - - pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); - if (!pinfoattr) - goto nla_put_failure; - - if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets)) - goto nla_put_failure; - - if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate)) - goto nla_put_failure; - - if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl)) - goto nla_put_failure; - - nla_nest_end(msg, pinfoattr); - - genlmsg_end(msg, hdr); - - genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, - NL80211_MCGRP_MLME, gfp); - return; - - nla_put_failure: - genlmsg_cancel(msg, hdr); - nlmsg_free(msg); + nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, + NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, count); } -EXPORT_SYMBOL(cfg80211_cqm_txe_notify); +EXPORT_SYMBOL(cfg80211_ch_switch_started_notify); void nl80211_radar_notify(struct cfg80211_registered_device *rdev, @@ -11647,54 +12152,6 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev, nlmsg_free(msg); } -void cfg80211_cqm_pktloss_notify(struct net_device *dev, - const u8 *peer, u32 num_packets, gfp_t gfp) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - struct sk_buff *msg; - struct nlattr *pinfoattr; - void *hdr; - - trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets); - - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); - if (!msg) - return; - - hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); - if (!hdr) { - nlmsg_free(msg); - return; - } - - if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || - nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || - nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer)) - goto nla_put_failure; - - pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); - if (!pinfoattr) - goto nla_put_failure; - - if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets)) - goto nla_put_failure; - - nla_nest_end(msg, pinfoattr); - - genlmsg_end(msg, hdr); - - genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, - NL80211_MCGRP_MLME, gfp); - return; - - nla_put_failure: - genlmsg_cancel(msg, hdr); - nlmsg_free(msg); -} -EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify); - void cfg80211_probe_status(struct net_device *dev, const u8 *addr, u64 cookie, bool acked, gfp_t gfp) { @@ -11782,6 +12239,67 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy, EXPORT_SYMBOL(cfg80211_report_obss_beacon); #ifdef CONFIG_PM +static int cfg80211_net_detect_results(struct sk_buff *msg, + struct cfg80211_wowlan_wakeup *wakeup) +{ + struct cfg80211_wowlan_nd_info *nd = wakeup->net_detect; + struct nlattr *nl_results, *nl_match, *nl_freqs; + int i, j; + + nl_results = nla_nest_start( + msg, NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS); + if (!nl_results) + return -EMSGSIZE; + + for (i = 0; i < nd->n_matches; i++) { + struct cfg80211_wowlan_nd_match *match = nd->matches[i]; + + nl_match = nla_nest_start(msg, i); + if (!nl_match) + break; + + /* The SSID attribute is optional in nl80211, but for + * simplicity reasons it's always present in the + * cfg80211 structure. If a driver can't pass the + * SSID, that needs to be changed. A zero length SSID + * is still a valid SSID (wildcard), so it cannot be + * used for this purpose. + */ + if (nla_put(msg, NL80211_ATTR_SSID, match->ssid.ssid_len, + match->ssid.ssid)) { + nla_nest_cancel(msg, nl_match); + goto out; + } + + if (match->n_channels) { + nl_freqs = nla_nest_start( + msg, NL80211_ATTR_SCAN_FREQUENCIES); + if (!nl_freqs) { + nla_nest_cancel(msg, nl_match); + goto out; + } + + for (j = 0; j < match->n_channels; j++) { + if (nla_put_u32(msg, + NL80211_ATTR_WIPHY_FREQ, + match->channels[j])) { + nla_nest_cancel(msg, nl_freqs); + nla_nest_cancel(msg, nl_match); + goto out; + } + } + + nla_nest_end(msg, nl_freqs); + } + + nla_nest_end(msg, nl_match); + } + +out: + nla_nest_end(msg, nl_results); + return 0; +} + void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, struct cfg80211_wowlan_wakeup *wakeup, gfp_t gfp) @@ -11876,6 +12394,10 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, goto free_msg; } + if (wakeup->net_detect && + cfg80211_net_detect_results(msg, wakeup)) + goto free_msg; + nla_nest_end(msg, reasons); } diff --git a/net/wireless/ocb.c b/net/wireless/ocb.c new file mode 100644 index 0000000000000000000000000000000000000000..c00d4a792319f4a25c9a07cd431d8f7b943ec5e2 --- /dev/null +++ b/net/wireless/ocb.c @@ -0,0 +1,88 @@ +/* + * OCB mode implementation + * + * Copyright: (c) 2014 Czech Technical University in Prague + * (c) 2014 Volkswagen Group Research + * Author: Rostislav Lisovy + * Funded by: Volkswagen Group Research + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include "nl80211.h" +#include "core.h" +#include "rdev-ops.h" + +int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ocb_setup *setup) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + ASSERT_WDEV_LOCK(wdev); + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB) + return -EOPNOTSUPP; + + if (WARN_ON(!setup->chandef.chan)) + return -EINVAL; + + err = rdev_join_ocb(rdev, dev, setup); + if (!err) + wdev->chandef = setup->chandef; + + return err; +} + +int cfg80211_join_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ocb_setup *setup) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + wdev_lock(wdev); + err = __cfg80211_join_ocb(rdev, dev, setup); + wdev_unlock(wdev); + + return err; +} + +int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + ASSERT_WDEV_LOCK(wdev); + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB) + return -EOPNOTSUPP; + + if (!rdev->ops->leave_ocb) + return -EOPNOTSUPP; + + err = rdev_leave_ocb(rdev, dev); + if (!err) + memset(&wdev->chandef, 0, sizeof(wdev->chandef)); + + return err; +} + +int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + wdev_lock(wdev); + err = __cfg80211_leave_ocb(rdev, dev); + wdev_unlock(wdev); + + return err; +} diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index f6d457d6a558a63cea2787b921af8c2ec66e9843..35cfb7134bdbaa2559cd222a8410b4f66f7ea41d 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -178,11 +178,12 @@ static inline int rdev_add_station(struct cfg80211_registered_device *rdev, } static inline int rdev_del_station(struct cfg80211_registered_device *rdev, - struct net_device *dev, u8 *mac) + struct net_device *dev, + struct station_del_parameters *params) { int ret; - trace_rdev_del_station(&rdev->wiphy, dev, mac); - ret = rdev->ops->del_station(&rdev->wiphy, dev, mac); + trace_rdev_del_station(&rdev->wiphy, dev, params); + ret = rdev->ops->del_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } @@ -263,6 +264,18 @@ static inline int rdev_get_mpath(struct cfg80211_registered_device *rdev, } +static inline int rdev_get_mpp(struct cfg80211_registered_device *rdev, + struct net_device *dev, u8 *dst, u8 *mpp, + struct mpath_info *pinfo) +{ + int ret; + + trace_rdev_get_mpp(&rdev->wiphy, dev, dst, mpp); + ret = rdev->ops->get_mpp(&rdev->wiphy, dev, dst, mpp, pinfo); + trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); + return ret; +} + static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) @@ -271,7 +284,20 @@ static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev, int ret; trace_rdev_dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop); ret = rdev->ops->dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop, - pinfo); + pinfo); + trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); + return ret; +} + +static inline int rdev_dump_mpp(struct cfg80211_registered_device *rdev, + struct net_device *dev, int idx, u8 *dst, + u8 *mpp, struct mpath_info *pinfo) + +{ + int ret; + + trace_rdev_dump_mpp(&rdev->wiphy, dev, idx, dst, mpp); + ret = rdev->ops->dump_mpp(&rdev->wiphy, dev, idx, dst, mpp, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } @@ -322,6 +348,27 @@ static inline int rdev_leave_mesh(struct cfg80211_registered_device *rdev, return ret; } +static inline int rdev_join_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ocb_setup *setup) +{ + int ret; + trace_rdev_join_ocb(&rdev->wiphy, dev, setup); + ret = rdev->ops->join_ocb(&rdev->wiphy, dev, setup); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + +static inline int rdev_leave_ocb(struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + int ret; + trace_rdev_leave_ocb(&rdev->wiphy, dev); + ret = rdev->ops->leave_ocb(&rdev->wiphy, dev); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + static inline int rdev_change_bss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct bss_parameters *params) @@ -946,4 +993,28 @@ rdev_del_tx_ts(struct cfg80211_registered_device *rdev, return ret; } +static inline int +rdev_tdls_channel_switch(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *addr, + u8 oper_class, struct cfg80211_chan_def *chandef) +{ + int ret; + + trace_rdev_tdls_channel_switch(&rdev->wiphy, dev, addr, oper_class, + chandef); + ret = rdev->ops->tdls_channel_switch(&rdev->wiphy, dev, addr, + oper_class, chandef); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + +static inline void +rdev_tdls_cancel_channel_switch(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *addr) +{ + trace_rdev_tdls_cancel_channel_switch(&rdev->wiphy, dev, addr); + rdev->ops->tdls_cancel_channel_switch(&rdev->wiphy, dev, addr); + trace_rdev_return_void(&rdev->wiphy); +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index b725a31a475178ec99de0e9aabc4d89e2ebb7d40..47be6163381caadf41afab40122d74f4a19448e6 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -56,6 +56,7 @@ #include #include "core.h" #include "reg.h" +#include "rdev-ops.h" #include "regdb.h" #include "nl80211.h" @@ -66,6 +67,12 @@ #define REG_DBG_PRINT(args...) #endif +/* + * Grace period we give before making sure all current interfaces reside on + * channels allowed by the current regulatory domain. + */ +#define REG_ENFORCE_GRACE_MS 60000 + /** * enum reg_request_treatment - regulatory request treatment * @@ -210,6 +217,9 @@ struct reg_beacon { struct ieee80211_channel chan; }; +static void reg_check_chans_work(struct work_struct *work); +static DECLARE_DELAYED_WORK(reg_check_chans, reg_check_chans_work); + static void reg_todo(struct work_struct *work); static DECLARE_WORK(reg_work, reg_todo); @@ -573,8 +583,9 @@ static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy) return get_cfg80211_regdom(); } -unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd, - const struct ieee80211_reg_rule *rule) +static unsigned int +reg_get_max_bandwidth_from_range(const struct ieee80211_regdomain *rd, + const struct ieee80211_reg_rule *rule) { const struct ieee80211_freq_range *freq_range = &rule->freq_range; const struct ieee80211_freq_range *freq_range_tmp; @@ -622,6 +633,27 @@ unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd, return end_freq - start_freq; } +unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd, + const struct ieee80211_reg_rule *rule) +{ + unsigned int bw = reg_get_max_bandwidth_from_range(rd, rule); + + if (rule->flags & NL80211_RRF_NO_160MHZ) + bw = min_t(unsigned int, bw, MHZ_TO_KHZ(80)); + if (rule->flags & NL80211_RRF_NO_80MHZ) + bw = min_t(unsigned int, bw, MHZ_TO_KHZ(40)); + + /* + * HT40+/HT40- limits are handled per-channel. Only limit BW if both + * are not allowed. + */ + if (rule->flags & NL80211_RRF_NO_HT40MINUS && + rule->flags & NL80211_RRF_NO_HT40PLUS) + bw = min_t(unsigned int, bw, MHZ_TO_KHZ(20)); + + return bw; +} + /* Sanity check on a regulatory rule */ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) { @@ -946,6 +978,16 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_NO_OFDM; if (rd_flags & NL80211_RRF_NO_OUTDOOR) channel_flags |= IEEE80211_CHAN_INDOOR_ONLY; + if (rd_flags & NL80211_RRF_GO_CONCURRENT) + channel_flags |= IEEE80211_CHAN_GO_CONCURRENT; + if (rd_flags & NL80211_RRF_NO_HT40MINUS) + channel_flags |= IEEE80211_CHAN_NO_HT40MINUS; + if (rd_flags & NL80211_RRF_NO_HT40PLUS) + channel_flags |= IEEE80211_CHAN_NO_HT40PLUS; + if (rd_flags & NL80211_RRF_NO_80MHZ) + channel_flags |= IEEE80211_CHAN_NO_80MHZ; + if (rd_flags & NL80211_RRF_NO_160MHZ) + channel_flags |= IEEE80211_CHAN_NO_160MHZ; return channel_flags; } @@ -1486,6 +1528,96 @@ static void reg_call_notifier(struct wiphy *wiphy, wiphy->reg_notifier(wiphy, request); } +static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct ieee80211_channel *ch; + struct cfg80211_chan_def chandef; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + bool ret = true; + + wdev_lock(wdev); + + if (!wdev->netdev || !netif_running(wdev->netdev)) + goto out; + + switch (wdev->iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + if (!wdev->beacon_interval) + goto out; + + ret = cfg80211_reg_can_beacon(wiphy, + &wdev->chandef, wdev->iftype); + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_ADHOC: + if (!wdev->current_bss || + !wdev->current_bss->pub.channel) + goto out; + + ch = wdev->current_bss->pub.channel; + if (rdev->ops->get_channel && + !rdev_get_channel(rdev, wdev, &chandef)) + ret = cfg80211_chandef_usable(wiphy, &chandef, + IEEE80211_CHAN_DISABLED); + else + ret = !(ch->flags & IEEE80211_CHAN_DISABLED); + break; + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_P2P_DEVICE: + /* no enforcement required */ + break; + default: + /* others not implemented for now */ + WARN_ON(1); + break; + } + +out: + wdev_unlock(wdev); + return ret; +} + +static void reg_leave_invalid_chans(struct wiphy *wiphy) +{ + struct wireless_dev *wdev; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + + ASSERT_RTNL(); + + list_for_each_entry(wdev, &rdev->wdev_list, list) + if (!reg_wdev_chan_valid(wiphy, wdev)) + cfg80211_leave(rdev, wdev); +} + +static void reg_check_chans_work(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + + REG_DBG_PRINT("Verifying active interfaces after reg change\n"); + rtnl_lock(); + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) + if (!(rdev->wiphy.regulatory_flags & + REGULATORY_IGNORE_STALE_KICKOFF)) + reg_leave_invalid_chans(&rdev->wiphy); + + rtnl_unlock(); +} + +static void reg_check_channels(void) +{ + /* + * Give usermode a chance to do something nicer (move to another + * channel, orderly disconnection), before forcing a disconnection. + */ + mod_delayed_work(system_power_efficient_wq, + ®_check_chans, + msecs_to_jiffies(REG_ENFORCE_GRACE_MS)); +} + static void wiphy_update_regulatory(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { @@ -1525,6 +1657,8 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) wiphy = &rdev->wiphy; wiphy_update_regulatory(wiphy, initiator); } + + reg_check_channels(); } static void handle_channel_custom(struct wiphy *wiphy, @@ -1565,10 +1699,23 @@ static void handle_channel_custom(struct wiphy *wiphy, if (max_bandwidth_khz < MHZ_TO_KHZ(160)) bw_flags |= IEEE80211_CHAN_NO_160MHZ; + chan->dfs_state_entered = jiffies; + chan->dfs_state = NL80211_DFS_USABLE; + + chan->beacon_found = false; chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_reg_power = chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); + + if (chan->flags & IEEE80211_CHAN_RADAR) { + if (reg_rule->dfs_cac_ms) + chan->dfs_cac_ms = reg_rule->dfs_cac_ms; + else + chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; + } + + chan->max_power = chan->max_reg_power; } static void handle_band_custom(struct wiphy *wiphy, @@ -1931,8 +2078,10 @@ static void reg_process_hint(struct regulatory_request *reg_request) /* This is required so that the orig_* parameters are saved */ if (treatment == REG_REQ_ALREADY_SET && wiphy && - wiphy->regulatory_flags & REGULATORY_STRICT_REG) + wiphy->regulatory_flags & REGULATORY_STRICT_REG) { wiphy_update_regulatory(wiphy, reg_request->initiator); + reg_check_channels(); + } return; @@ -2813,6 +2962,7 @@ void regulatory_exit(void) cancel_work_sync(®_work); cancel_delayed_work_sync(®_timeout); + cancel_delayed_work_sync(®_check_chans); /* Lock to suppress warnings */ rtnl_lock(); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index dc1668ff543b90927d9cb4e08b38897ac8e97f6c..0ab3711c79a01ae98cdaaafa64bf12ac94e80271 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -80,9 +80,18 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) if (!request) return -ENOMEM; - if (wdev->conn->params.channel) + if (wdev->conn->params.channel) { + enum ieee80211_band band = wdev->conn->params.channel->band; + struct ieee80211_supported_band *sband = + wdev->wiphy->bands[band]; + + if (!sband) { + kfree(request); + return -EINVAL; + } request->channels[0] = wdev->conn->params.channel; - else { + request->rates[band] = (1 << sband->n_bitrates) - 1; + } else { int i = 0, j; enum ieee80211_band band; struct ieee80211_supported_band *bands; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 625a6e6d1168376e7d5b3f55d09729ebc786f1f6..ad38910f7036d528fc04a95981fabea5d0f6ba35 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -600,6 +600,11 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ibss, TP_ARGS(wiphy, netdev) ); +DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ocb, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), + TP_ARGS(wiphy, netdev) +); + DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) @@ -680,9 +685,34 @@ DECLARE_EVENT_CLASS(wiphy_netdev_mac_evt, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac)) ); -DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_station, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), - TP_ARGS(wiphy, netdev, mac) +DECLARE_EVENT_CLASS(station_del, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct station_del_parameters *params), + TP_ARGS(wiphy, netdev, params), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(sta_mac) + __field(u8, subtype) + __field(u16, reason_code) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(sta_mac, params->mac); + __entry->subtype = params->subtype; + __entry->reason_code = params->reason_code; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT + ", subtype: %u, reason_code: %u", + WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac), + __entry->subtype, __entry->reason_code) +); + +DEFINE_EVENT(station_del, rdev_del_station, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct station_del_parameters *params), + TP_ARGS(wiphy, netdev, params) ); DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_get_station, @@ -801,6 +831,51 @@ TRACE_EVENT(rdev_dump_mpath, MAC_PR_ARG(next_hop)) ); +TRACE_EVENT(rdev_get_mpp, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + u8 *dst, u8 *mpp), + TP_ARGS(wiphy, netdev, dst, mpp), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(dst) + MAC_ENTRY(mpp) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(dst, dst); + MAC_ASSIGN(mpp, mpp); + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: " MAC_PR_FMT + ", mpp: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, + MAC_PR_ARG(dst), MAC_PR_ARG(mpp)) +); + +TRACE_EVENT(rdev_dump_mpp, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx, + u8 *dst, u8 *mpp), + TP_ARGS(wiphy, netdev, idx, mpp, dst), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(dst) + MAC_ENTRY(mpp) + __field(int, idx) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(dst, dst); + MAC_ASSIGN(mpp, mpp); + __entry->idx = idx; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: " + MAC_PR_FMT ", mpp: " MAC_PR_FMT, + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, MAC_PR_ARG(dst), + MAC_PR_ARG(mpp)) +); + TRACE_EVENT(rdev_return_int_mpath_info, TP_PROTO(struct wiphy *wiphy, int ret, struct mpath_info *pinfo), TP_ARGS(wiphy, ret, pinfo), @@ -1246,6 +1321,22 @@ TRACE_EVENT(rdev_join_ibss, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid) ); +TRACE_EVENT(rdev_join_ocb, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + const struct ocb_setup *setup), + TP_ARGS(wiphy, netdev, setup), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, + WIPHY_PR_ARG, NETDEV_PR_ARG) +); + TRACE_EVENT(rdev_set_wiphy_params, TP_PROTO(struct wiphy *wiphy, u32 changed), TP_ARGS(wiphy, changed), @@ -1941,6 +2032,48 @@ TRACE_EVENT(rdev_del_tx_ts, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tsid) ); +TRACE_EVENT(rdev_tdls_channel_switch, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef), + TP_ARGS(wiphy, netdev, addr, oper_class, chandef), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(addr) + __field(u8, oper_class) + CHAN_DEF_ENTRY + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(addr, addr); + CHAN_DEF_ASSIGN(chandef); + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT + " oper class %d, " CHAN_DEF_PR_FMT, + WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr), + __entry->oper_class, CHAN_DEF_PR_ARG) +); + +TRACE_EVENT(rdev_tdls_cancel_channel_switch, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + const u8 *addr), + TP_ARGS(wiphy, netdev, addr), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(addr) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(addr, addr); + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT, + WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr)) +); + /************************************************************* * cfg80211 exported functions traces * *************************************************************/ @@ -2264,6 +2397,22 @@ TRACE_EVENT(cfg80211_ch_switch_notify, NETDEV_PR_ARG, CHAN_DEF_PR_ARG) ); +TRACE_EVENT(cfg80211_ch_switch_started_notify, + TP_PROTO(struct net_device *netdev, + struct cfg80211_chan_def *chandef), + TP_ARGS(netdev, chandef), + TP_STRUCT__entry( + NETDEV_ENTRY + CHAN_DEF_ENTRY + ), + TP_fast_assign( + NETDEV_ASSIGN; + CHAN_DEF_ASSIGN(chandef); + ), + TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT, + NETDEV_PR_ARG, CHAN_DEF_PR_ARG) +); + TRACE_EVENT(cfg80211_radar_event, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, chandef), diff --git a/net/wireless/util.c b/net/wireless/util.c index 5e233a577d0fcd15e39b5eebc5678598c37637df..d0ac795445b7e40dc92ebb1e0bd9095fc4bab048 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -442,7 +442,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, break; case cpu_to_le16(0): if (iftype != NL80211_IFTYPE_ADHOC && - iftype != NL80211_IFTYPE_STATION) + iftype != NL80211_IFTYPE_STATION && + iftype != NL80211_IFTYPE_OCB) return -1; break; } @@ -519,6 +520,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, memcpy(hdr.addr3, skb->data, ETH_ALEN); hdrlen = 24; break; + case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_ADHOC: /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); @@ -937,6 +939,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, if (dev->ieee80211_ptr->use_4addr) break; /* fall through */ + case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_ADHOC: dev->priv_flags |= IFF_DONT_BRIDGE; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 5ad4418ef093f3625ac5efb205dafc2590c4e0e6..d9149b68b9bc5c1d100d654d256048b30e268cdb 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1170,7 +1170,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, skb_reset_transport_header(skb); skb_put(skb, len); - rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); + rc = memcpy_from_msg(skb_transport_header(skb), msg, len); if (rc) goto out_kfree_skb; @@ -1335,7 +1335,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, /* Currently, each datagram always contains a complete record */ msg->msg_flags |= MSG_EOR; - rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + rc = skb_copy_datagram_msg(skb, 0, msg, copied); if (rc) goto out_free_dgram; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 88bf289abdc925a05730d6695944bdef1ec67eaf..cee479bc655c4f317edb4e90cbcb06ec3b424b81 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -55,6 +55,7 @@ static int stale_bundle(struct dst_entry *dst); static int xfrm_bundle_ok(struct xfrm_dst *xdst); static void xfrm_policy_queue_process(unsigned long arg); +static void __xfrm_policy_link(struct xfrm_policy *pol, int dir); static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, int dir); @@ -561,7 +562,7 @@ static void xfrm_hash_resize(struct work_struct *work) mutex_lock(&hash_resize_mutex); total = 0; - for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { + for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { if (xfrm_bydst_should_resize(net, dir, &total)) xfrm_bydst_resize(net, dir); } @@ -601,7 +602,7 @@ static void xfrm_hash_rebuild(struct work_struct *work) write_lock_bh(&net->xfrm.xfrm_policy_lock); /* reset the bydst and inexact table in all directions */ - for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { + for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); hmask = net->xfrm.policy_bydst[dir].hmask; odst = net->xfrm.policy_bydst[dir].table; @@ -779,8 +780,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) hlist_add_behind(&policy->bydst, newpos); else hlist_add_head(&policy->bydst, chain); - xfrm_pol_hold(policy); - net->xfrm.policy_count[dir]++; + __xfrm_policy_link(policy, dir); atomic_inc(&net->xfrm.flow_cache_genid); /* After previous checking, family can either be AF_INET or AF_INET6 */ @@ -799,7 +799,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) policy->curlft.use_time = 0; if (!mod_timer(&policy->timer, jiffies + HZ)) xfrm_pol_hold(policy); - list_add(&policy->walk.all, &net->xfrm.policy_all); write_unlock_bh(&net->xfrm.xfrm_policy_lock); if (delpol) @@ -1247,17 +1246,10 @@ out: static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) { struct net *net = xp_net(pol); - struct hlist_head *chain = policy_hash_bysel(net, &pol->selector, - pol->family, dir); list_add(&pol->walk.all, &net->xfrm.policy_all); - hlist_add_head(&pol->bydst, chain); - hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index)); net->xfrm.policy_count[dir]++; xfrm_pol_hold(pol); - - if (xfrm_bydst_should_resize(net, dir, NULL)) - schedule_work(&net->xfrm.policy_hash_work); } static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, @@ -1265,17 +1257,31 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, { struct net *net = xp_net(pol); - if (hlist_unhashed(&pol->bydst)) + if (list_empty(&pol->walk.all)) return NULL; - hlist_del_init(&pol->bydst); - hlist_del(&pol->byidx); - list_del(&pol->walk.all); + /* Socket policies are not hashed. */ + if (!hlist_unhashed(&pol->bydst)) { + hlist_del(&pol->bydst); + hlist_del(&pol->byidx); + } + + list_del_init(&pol->walk.all); net->xfrm.policy_count[dir]--; return pol; } +static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir) +{ + __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir); +} + +static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir) +{ + __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir); +} + int xfrm_policy_delete(struct xfrm_policy *pol, int dir) { struct net *net = xp_net(pol); @@ -1307,7 +1313,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) if (pol) { pol->curlft.add_time = get_seconds(); pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); - __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); + xfrm_sk_policy_link(pol, dir); } if (old_pol) { if (pol) @@ -1316,7 +1322,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) /* Unlinking succeeds always. This is the only function * allowed to delete or replace socket policy. */ - __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); + xfrm_sk_policy_unlink(old_pol, dir); } write_unlock_bh(&net->xfrm.xfrm_policy_lock); @@ -1349,7 +1355,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) memcpy(newp->xfrm_vec, old->xfrm_vec, newp->xfrm_nr*sizeof(struct xfrm_tmpl)); write_lock_bh(&net->xfrm.xfrm_policy_lock); - __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); + xfrm_sk_policy_link(newp, dir); write_unlock_bh(&net->xfrm.xfrm_policy_lock); xfrm_pol_put(newp); } @@ -1878,7 +1884,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, static void xfrm_policy_queue_process(unsigned long arg) { - int err = 0; struct sk_buff *skb; struct sock *sk; struct dst_entry *dst; @@ -1941,7 +1946,7 @@ static void xfrm_policy_queue_process(unsigned long arg) skb_dst_drop(skb); skb_dst_set(skb, dst); - err = dst_output(skb); + dst_output(skb); } out: @@ -2966,10 +2971,11 @@ static int __net_init xfrm_policy_init(struct net *net) goto out_byidx; net->xfrm.policy_idx_hmask = hmask; - for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { + for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { struct xfrm_policy_hash *htab; net->xfrm.policy_count[dir] = 0; + net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0; INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); htab = &net->xfrm.policy_bydst[dir]; @@ -3021,7 +3027,7 @@ static void xfrm_policy_fini(struct net *net) WARN_ON(!list_empty(&net->xfrm.policy_all)); - for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { + for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { struct xfrm_policy_hash *htab; WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e812e988c111b7d83b4e52321f394aabc9a69036..8128594ab3797e4814e77ef80246456641458027 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -824,13 +824,15 @@ static int copy_to_user_state_extra(struct xfrm_state *x, ret = xfrm_mark_put(skb, &x->mark); if (ret) goto out; - if (x->replay_esn) { + if (x->replay_esn) ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL, xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); - if (ret) - goto out; - } + else + ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), + &x->replay); + if (ret) + goto out; if (x->security) ret = copy_sec_ctx(x->security, skb); out: @@ -2569,6 +2571,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x) l += nla_total_size(sizeof(x->tfcpad)); if (x->replay_esn) l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); + else + l += nla_total_size(sizeof(struct xfrm_replay_state)); if (x->security) l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + x->security->ctx_len); diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 634391797856243d609bb81f973b6f89a3712944..b5b3600dcdf5d004e01c1a558011519adfc386ba 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -2,11 +2,32 @@ obj- := dummy.o # List of programs to build -hostprogs-y := test_verifier +hostprogs-y := test_verifier test_maps +hostprogs-y += sock_example +hostprogs-y += sockex1 +hostprogs-y += sockex2 test_verifier-objs := test_verifier.o libbpf.o +test_maps-objs := test_maps.o libbpf.o +sock_example-objs := sock_example.o libbpf.o +sockex1-objs := bpf_load.o libbpf.o sockex1_user.o +sockex2-objs := bpf_load.o libbpf.o sockex2_user.o # Tell kbuild to always build the programs always := $(hostprogs-y) +always += sockex1_kern.o +always += sockex2_kern.o HOSTCFLAGS += -I$(objtree)/usr/include + +HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable +HOSTLOADLIBES_sockex1 += -lelf +HOSTLOADLIBES_sockex2 += -lelf + +# point this to your LLVM backend with bpf support +LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc + +%.o: %.c + clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ + -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ + -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..ca0333146006af20352ec704d5049085db975206 --- /dev/null +++ b/samples/bpf/bpf_helpers.h @@ -0,0 +1,40 @@ +#ifndef __BPF_HELPERS_H +#define __BPF_HELPERS_H + +/* helper macro to place programs, maps, license in + * different sections in elf_bpf file. Section names + * are interpreted by elf_bpf loader + */ +#define SEC(NAME) __attribute__((section(NAME), used)) + +/* helper functions called from eBPF programs written in C */ +static void *(*bpf_map_lookup_elem)(void *map, void *key) = + (void *) BPF_FUNC_map_lookup_elem; +static int (*bpf_map_update_elem)(void *map, void *key, void *value, + unsigned long long flags) = + (void *) BPF_FUNC_map_update_elem; +static int (*bpf_map_delete_elem)(void *map, void *key) = + (void *) BPF_FUNC_map_delete_elem; + +/* llvm builtin functions that eBPF C program may use to + * emit BPF_LD_ABS and BPF_LD_IND instructions + */ +struct sk_buff; +unsigned long long load_byte(void *skb, + unsigned long long off) asm("llvm.bpf.load.byte"); +unsigned long long load_half(void *skb, + unsigned long long off) asm("llvm.bpf.load.half"); +unsigned long long load_word(void *skb, + unsigned long long off) asm("llvm.bpf.load.word"); + +/* a helper structure used by eBPF C program + * to describe map attributes to elf_bpf loader + */ +struct bpf_map_def { + unsigned int type; + unsigned int key_size; + unsigned int value_size; + unsigned int max_entries; +}; + +#endif diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c new file mode 100644 index 0000000000000000000000000000000000000000..1831d236382b5e06636245dbc87ed819f227e910 --- /dev/null +++ b/samples/bpf/bpf_load.c @@ -0,0 +1,203 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" +#include "bpf_helpers.h" +#include "bpf_load.h" + +static char license[128]; +static bool processed_sec[128]; +int map_fd[MAX_MAPS]; +int prog_fd[MAX_PROGS]; +int prog_cnt; + +static int load_and_attach(const char *event, struct bpf_insn *prog, int size) +{ + int fd; + bool is_socket = strncmp(event, "socket", 6) == 0; + + if (!is_socket) + /* tracing events tbd */ + return -1; + + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, + prog, size, license); + + if (fd < 0) { + printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf); + return -1; + } + + prog_fd[prog_cnt++] = fd; + + return 0; +} + +static int load_maps(struct bpf_map_def *maps, int len) +{ + int i; + + for (i = 0; i < len / sizeof(struct bpf_map_def); i++) { + + map_fd[i] = bpf_create_map(maps[i].type, + maps[i].key_size, + maps[i].value_size, + maps[i].max_entries); + if (map_fd[i] < 0) + return 1; + } + return 0; +} + +static int get_sec(Elf *elf, int i, GElf_Ehdr *ehdr, char **shname, + GElf_Shdr *shdr, Elf_Data **data) +{ + Elf_Scn *scn; + + scn = elf_getscn(elf, i); + if (!scn) + return 1; + + if (gelf_getshdr(scn, shdr) != shdr) + return 2; + + *shname = elf_strptr(elf, ehdr->e_shstrndx, shdr->sh_name); + if (!*shname || !shdr->sh_size) + return 3; + + *data = elf_getdata(scn, 0); + if (!*data || elf_getdata(scn, *data) != NULL) + return 4; + + return 0; +} + +static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols, + GElf_Shdr *shdr, struct bpf_insn *insn) +{ + int i, nrels; + + nrels = shdr->sh_size / shdr->sh_entsize; + + for (i = 0; i < nrels; i++) { + GElf_Sym sym; + GElf_Rel rel; + unsigned int insn_idx; + + gelf_getrel(data, i, &rel); + + insn_idx = rel.r_offset / sizeof(struct bpf_insn); + + gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym); + + if (insn[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { + printf("invalid relo for insn[%d].code 0x%x\n", + insn_idx, insn[insn_idx].code); + return 1; + } + insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; + insn[insn_idx].imm = map_fd[sym.st_value / sizeof(struct bpf_map_def)]; + } + + return 0; +} + +int load_bpf_file(char *path) +{ + int fd, i; + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr, shdr_prog; + Elf_Data *data, *data_prog, *symbols = NULL; + char *shname, *shname_prog; + + if (elf_version(EV_CURRENT) == EV_NONE) + return 1; + + fd = open(path, O_RDONLY, 0); + if (fd < 0) + return 1; + + elf = elf_begin(fd, ELF_C_READ, NULL); + + if (!elf) + return 1; + + if (gelf_getehdr(elf, &ehdr) != &ehdr) + return 1; + + /* scan over all elf sections to get license and map info */ + for (i = 1; i < ehdr.e_shnum; i++) { + + if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) + continue; + + if (0) /* helpful for llvm debugging */ + printf("section %d:%s data %p size %zd link %d flags %d\n", + i, shname, data->d_buf, data->d_size, + shdr.sh_link, (int) shdr.sh_flags); + + if (strcmp(shname, "license") == 0) { + processed_sec[i] = true; + memcpy(license, data->d_buf, data->d_size); + } else if (strcmp(shname, "maps") == 0) { + processed_sec[i] = true; + if (load_maps(data->d_buf, data->d_size)) + return 1; + } else if (shdr.sh_type == SHT_SYMTAB) { + symbols = data; + } + } + + /* load programs that need map fixup (relocations) */ + for (i = 1; i < ehdr.e_shnum; i++) { + + if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) + continue; + if (shdr.sh_type == SHT_REL) { + struct bpf_insn *insns; + + if (get_sec(elf, shdr.sh_info, &ehdr, &shname_prog, + &shdr_prog, &data_prog)) + continue; + + insns = (struct bpf_insn *) data_prog->d_buf; + + processed_sec[shdr.sh_info] = true; + processed_sec[i] = true; + + if (parse_relo_and_apply(data, symbols, &shdr, insns)) + continue; + + if (memcmp(shname_prog, "events/", 7) == 0 || + memcmp(shname_prog, "socket", 6) == 0) + load_and_attach(shname_prog, insns, data_prog->d_size); + } + } + + /* load programs that don't use maps */ + for (i = 1; i < ehdr.e_shnum; i++) { + + if (processed_sec[i]) + continue; + + if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) + continue; + + if (memcmp(shname, "events/", 7) == 0 || + memcmp(shname, "socket", 6) == 0) + load_and_attach(shname, data->d_buf, data->d_size); + } + + close(fd); + return 0; +} diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h new file mode 100644 index 0000000000000000000000000000000000000000..27789a34f5e68e93c60b8e2e3226313e265d7464 --- /dev/null +++ b/samples/bpf/bpf_load.h @@ -0,0 +1,24 @@ +#ifndef __BPF_LOAD_H +#define __BPF_LOAD_H + +#define MAX_MAPS 32 +#define MAX_PROGS 32 + +extern int map_fd[MAX_MAPS]; +extern int prog_fd[MAX_PROGS]; + +/* parses elf file compiled by llvm .c->.o + * . parses 'maps' section and creates maps via BPF syscall + * . parses 'license' section and passes it to syscall + * . parses elf relocations for BPF maps and adjusts BPF_LD_IMM64 insns by + * storing map_fd into insn->imm and marking such insns as BPF_PSEUDO_MAP_FD + * . loads eBPF programs via BPF syscall + * + * One ELF file can contain multiple BPF programs which will be loaded + * and their FDs stored stored in prog_fd array + * + * returns zero on success + */ +int load_bpf_file(char *path); + +#endif diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c index ff65044207384b908f511f48567e1fb57ec72982..46d50b7ddf796f8c35fbeef48fa6aa8b6871fedc 100644 --- a/samples/bpf/libbpf.c +++ b/samples/bpf/libbpf.c @@ -7,6 +7,10 @@ #include #include #include +#include +#include +#include +#include #include "libbpf.h" static __u64 ptr_to_u64(void *ptr) @@ -27,12 +31,13 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, return syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); } -int bpf_update_elem(int fd, void *key, void *value) +int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags) { union bpf_attr attr = { .map_fd = fd, .key = ptr_to_u64(key), .value = ptr_to_u64(value), + .flags = flags, }; return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); @@ -92,3 +97,27 @@ int bpf_prog_load(enum bpf_prog_type prog_type, return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); } + +int open_raw_sock(const char *name) +{ + struct sockaddr_ll sll; + int sock; + + sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL)); + if (sock < 0) { + printf("cannot create raw socket\n"); + return -1; + } + + memset(&sll, 0, sizeof(sll)); + sll.sll_family = AF_PACKET; + sll.sll_ifindex = if_nametoindex(name); + sll.sll_protocol = htons(ETH_P_ALL); + if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) { + printf("bind to %s: %s\n", name, strerror(errno)); + close(sock); + return -1; + } + + return sock; +} diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h index 8a31babeca5dc1878f87d4e400aedc2ca2b9388e..58c5fe1bdba1f08c278a750e69a7a32f706f819c 100644 --- a/samples/bpf/libbpf.h +++ b/samples/bpf/libbpf.h @@ -6,7 +6,7 @@ struct bpf_insn; int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries); -int bpf_update_elem(int fd, void *key, void *value); +int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags); int bpf_lookup_elem(int fd, void *key, void *value); int bpf_delete_elem(int fd, void *key); int bpf_get_next_key(int fd, void *key, void *next_key); @@ -15,7 +15,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, int insn_len, const char *license); -#define LOG_BUF_SIZE 8192 +#define LOG_BUF_SIZE 65536 extern char bpf_log_buf[LOG_BUF_SIZE]; /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ @@ -99,6 +99,16 @@ extern char bpf_log_buf[LOG_BUF_SIZE]; BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) +/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ + +#define BPF_LD_ABS(SIZE, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ @@ -169,4 +179,7 @@ extern char bpf_log_buf[LOG_BUF_SIZE]; .off = 0, \ .imm = 0 }) +/* create RAW socket and bind to interface 'name' */ +int open_raw_sock(const char *name); + #endif diff --git a/samples/bpf/sock_example.c b/samples/bpf/sock_example.c new file mode 100644 index 0000000000000000000000000000000000000000..c8ad0404416f5da95e318cf4580255abc5174ee0 --- /dev/null +++ b/samples/bpf/sock_example.c @@ -0,0 +1,101 @@ +/* eBPF example program: + * - creates arraymap in kernel with key 4 bytes and value 8 bytes + * + * - loads eBPF program: + * r0 = skb->data[ETH_HLEN + offsetof(struct iphdr, protocol)]; + * *(u32*)(fp - 4) = r0; + * // assuming packet is IPv4, lookup ip->proto in a map + * value = bpf_map_lookup_elem(map_fd, fp - 4); + * if (value) + * (*(u64*)value) += 1; + * + * - attaches this program to eth0 raw socket + * + * - every second user space reads map[tcp], map[udp], map[icmp] to see + * how many packets of given protocol were seen on eth0 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" + +static int test_sock(void) +{ + int sock = -1, map_fd, prog_fd, i, key; + long long value = 0, tcp_cnt, udp_cnt, icmp_cnt; + + map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), + 256); + if (map_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + goto cleanup; + } + + struct bpf_insn prog[] = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LD_ABS(BPF_B, ETH_HLEN + offsetof(struct iphdr, protocol) /* R0 = ip->proto */), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */ + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ + BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */ + BPF_EXIT_INSN(), + }; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, sizeof(prog), + "GPL"); + if (prog_fd < 0) { + printf("failed to load prog '%s'\n", strerror(errno)); + goto cleanup; + } + + sock = open_raw_sock("lo"); + + if (setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd, + sizeof(prog_fd)) < 0) { + printf("setsockopt %s\n", strerror(errno)); + goto cleanup; + } + + for (i = 0; i < 10; i++) { + key = IPPROTO_TCP; + assert(bpf_lookup_elem(map_fd, &key, &tcp_cnt) == 0); + + key = IPPROTO_UDP; + assert(bpf_lookup_elem(map_fd, &key, &udp_cnt) == 0); + + key = IPPROTO_ICMP; + assert(bpf_lookup_elem(map_fd, &key, &icmp_cnt) == 0); + + printf("TCP %lld UDP %lld ICMP %lld packets\n", + tcp_cnt, udp_cnt, icmp_cnt); + sleep(1); + } + +cleanup: + /* maps, programs, raw sockets will auto cleanup on process exit */ + return 0; +} + +int main(void) +{ + FILE *f; + + f = popen("ping -c5 localhost", "r"); + (void)f; + + return test_sock(); +} diff --git a/samples/bpf/sockex1_kern.c b/samples/bpf/sockex1_kern.c new file mode 100644 index 0000000000000000000000000000000000000000..0668926629154836b09dc81b9e4cf78cfb4eb343 --- /dev/null +++ b/samples/bpf/sockex1_kern.c @@ -0,0 +1,25 @@ +#include +#include +#include +#include "bpf_helpers.h" + +struct bpf_map_def SEC("maps") my_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(long), + .max_entries = 256, +}; + +SEC("socket1") +int bpf_prog1(struct sk_buff *skb) +{ + int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); + long *value; + + value = bpf_map_lookup_elem(&my_map, &index); + if (value) + __sync_fetch_and_add(value, 1); + + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/sockex1_user.c b/samples/bpf/sockex1_user.c new file mode 100644 index 0000000000000000000000000000000000000000..34a443ff383121719f6f919694d5023fa36e1742 --- /dev/null +++ b/samples/bpf/sockex1_user.c @@ -0,0 +1,49 @@ +#include +#include +#include +#include "libbpf.h" +#include "bpf_load.h" +#include +#include + +int main(int ac, char **argv) +{ + char filename[256]; + FILE *f; + int i, sock; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + + sock = open_raw_sock("lo"); + + assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, prog_fd, + sizeof(prog_fd[0])) == 0); + + f = popen("ping -c5 localhost", "r"); + (void) f; + + for (i = 0; i < 5; i++) { + long long tcp_cnt, udp_cnt, icmp_cnt; + int key; + + key = IPPROTO_TCP; + assert(bpf_lookup_elem(map_fd[0], &key, &tcp_cnt) == 0); + + key = IPPROTO_UDP; + assert(bpf_lookup_elem(map_fd[0], &key, &udp_cnt) == 0); + + key = IPPROTO_ICMP; + assert(bpf_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0); + + printf("TCP %lld UDP %lld ICMP %lld packets\n", + tcp_cnt, udp_cnt, icmp_cnt); + sleep(1); + } + + return 0; +} diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c new file mode 100644 index 0000000000000000000000000000000000000000..6f0135f0f2176c8a1825c884c4a28a13c7c1f123 --- /dev/null +++ b/samples/bpf/sockex2_kern.c @@ -0,0 +1,215 @@ +#include +#include "bpf_helpers.h" +#include +#include +#include +#include +#include +#include +#define IP_MF 0x2000 +#define IP_OFFSET 0x1FFF + +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct flow_keys { + __be32 src; + __be32 dst; + union { + __be32 ports; + __be16 port16[2]; + }; + __u16 thoff; + __u8 ip_proto; +}; + +static inline int proto_ports_offset(__u64 proto) +{ + switch (proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + case IPPROTO_DCCP: + case IPPROTO_ESP: + case IPPROTO_SCTP: + case IPPROTO_UDPLITE: + return 0; + case IPPROTO_AH: + return 4; + default: + return 0; + } +} + +static inline int ip_is_fragment(struct sk_buff *ctx, __u64 nhoff) +{ + return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off)) + & (IP_MF | IP_OFFSET); +} + +static inline __u32 ipv6_addr_hash(struct sk_buff *ctx, __u64 off) +{ + __u64 w0 = load_word(ctx, off); + __u64 w1 = load_word(ctx, off + 4); + __u64 w2 = load_word(ctx, off + 8); + __u64 w3 = load_word(ctx, off + 12); + + return (__u32)(w0 ^ w1 ^ w2 ^ w3); +} + +static inline __u64 parse_ip(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto, + struct flow_keys *flow) +{ + __u64 verlen; + + if (unlikely(ip_is_fragment(skb, nhoff))) + *ip_proto = 0; + else + *ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol)); + + if (*ip_proto != IPPROTO_GRE) { + flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); + flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); + } + + verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/); + if (likely(verlen == 0x45)) + nhoff += 20; + else + nhoff += (verlen & 0xF) << 2; + + return nhoff; +} + +static inline __u64 parse_ipv6(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto, + struct flow_keys *flow) +{ + *ip_proto = load_byte(skb, + nhoff + offsetof(struct ipv6hdr, nexthdr)); + flow->src = ipv6_addr_hash(skb, + nhoff + offsetof(struct ipv6hdr, saddr)); + flow->dst = ipv6_addr_hash(skb, + nhoff + offsetof(struct ipv6hdr, daddr)); + nhoff += sizeof(struct ipv6hdr); + + return nhoff; +} + +static inline bool flow_dissector(struct sk_buff *skb, struct flow_keys *flow) +{ + __u64 nhoff = ETH_HLEN; + __u64 ip_proto; + __u64 proto = load_half(skb, 12); + int poff; + + if (proto == ETH_P_8021AD) { + proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, + h_vlan_encapsulated_proto)); + nhoff += sizeof(struct vlan_hdr); + } + + if (proto == ETH_P_8021Q) { + proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, + h_vlan_encapsulated_proto)); + nhoff += sizeof(struct vlan_hdr); + } + + if (likely(proto == ETH_P_IP)) + nhoff = parse_ip(skb, nhoff, &ip_proto, flow); + else if (proto == ETH_P_IPV6) + nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); + else + return false; + + switch (ip_proto) { + case IPPROTO_GRE: { + struct gre_hdr { + __be16 flags; + __be16 proto; + }; + + __u64 gre_flags = load_half(skb, + nhoff + offsetof(struct gre_hdr, flags)); + __u64 gre_proto = load_half(skb, + nhoff + offsetof(struct gre_hdr, proto)); + + if (gre_flags & (GRE_VERSION|GRE_ROUTING)) + break; + + proto = gre_proto; + nhoff += 4; + if (gre_flags & GRE_CSUM) + nhoff += 4; + if (gre_flags & GRE_KEY) + nhoff += 4; + if (gre_flags & GRE_SEQ) + nhoff += 4; + + if (proto == ETH_P_8021Q) { + proto = load_half(skb, + nhoff + offsetof(struct vlan_hdr, + h_vlan_encapsulated_proto)); + nhoff += sizeof(struct vlan_hdr); + } + + if (proto == ETH_P_IP) + nhoff = parse_ip(skb, nhoff, &ip_proto, flow); + else if (proto == ETH_P_IPV6) + nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); + else + return false; + break; + } + case IPPROTO_IPIP: + nhoff = parse_ip(skb, nhoff, &ip_proto, flow); + break; + case IPPROTO_IPV6: + nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); + break; + default: + break; + } + + flow->ip_proto = ip_proto; + poff = proto_ports_offset(ip_proto); + if (poff >= 0) { + nhoff += poff; + flow->ports = load_word(skb, nhoff); + } + + flow->thoff = (__u16) nhoff; + + return true; +} + +struct bpf_map_def SEC("maps") hash_map = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(__be32), + .value_size = sizeof(long), + .max_entries = 1024, +}; + +SEC("socket2") +int bpf_prog2(struct sk_buff *skb) +{ + struct flow_keys flow; + long *value; + u32 key; + + if (!flow_dissector(skb, &flow)) + return 0; + + key = flow.dst; + value = bpf_map_lookup_elem(&hash_map, &key); + if (value) { + __sync_fetch_and_add(value, 1); + } else { + long val = 1; + + bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY); + } + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c new file mode 100644 index 0000000000000000000000000000000000000000..d2d5f5a790d3c4952ff027b326add6a29e048157 --- /dev/null +++ b/samples/bpf/sockex2_user.c @@ -0,0 +1,44 @@ +#include +#include +#include +#include "libbpf.h" +#include "bpf_load.h" +#include +#include + +int main(int ac, char **argv) +{ + char filename[256]; + FILE *f; + int i, sock; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + + sock = open_raw_sock("lo"); + + assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, prog_fd, + sizeof(prog_fd[0])) == 0); + + f = popen("ping -c5 localhost", "r"); + (void) f; + + for (i = 0; i < 5; i++) { + int key = 0, next_key; + long long value; + + while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) { + bpf_lookup_elem(map_fd[0], &next_key, &value); + printf("ip %s count %lld\n", + inet_ntoa((struct in_addr){htonl(next_key)}), + value); + key = next_key; + } + sleep(1); + } + return 0; +} diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c new file mode 100644 index 0000000000000000000000000000000000000000..e286b42307f30dfe840267be16c53b0580150905 --- /dev/null +++ b/samples/bpf/test_maps.c @@ -0,0 +1,291 @@ +/* + * Testsuite for eBPF maps + * + * Copyright (c) 2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" + +/* sanity tests for map API */ +static void test_hashmap_sanity(int i, void *data) +{ + long long key, next_key, value; + int map_fd; + + map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 2); + if (map_fd < 0) { + printf("failed to create hashmap '%s'\n", strerror(errno)); + exit(1); + } + + key = 1; + value = 1234; + /* insert key=1 element */ + assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); + + value = 0; + /* BPF_NOEXIST means: add new element if it doesn't exist */ + assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && + /* key=1 already exists */ + errno == EEXIST); + + assert(bpf_update_elem(map_fd, &key, &value, -1) == -1 && errno == EINVAL); + + /* check that key=1 can be found */ + assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 1234); + + key = 2; + /* check that key=2 is not found */ + assert(bpf_lookup_elem(map_fd, &key, &value) == -1 && errno == ENOENT); + + /* BPF_EXIST means: update existing element */ + assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == -1 && + /* key=2 is not there */ + errno == ENOENT); + + /* insert key=2 element */ + assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); + + /* key=1 and key=2 were inserted, check that key=0 cannot be inserted + * due to max_entries limit + */ + key = 0; + assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && + errno == E2BIG); + + /* check that key = 0 doesn't exist */ + assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); + + /* iterate over two elements */ + assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && + next_key == 2); + assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && + next_key == 1); + assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && + errno == ENOENT); + + /* delete both elements */ + key = 1; + assert(bpf_delete_elem(map_fd, &key) == 0); + key = 2; + assert(bpf_delete_elem(map_fd, &key) == 0); + assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); + + key = 0; + /* check that map is empty */ + assert(bpf_get_next_key(map_fd, &key, &next_key) == -1 && + errno == ENOENT); + close(map_fd); +} + +static void test_arraymap_sanity(int i, void *data) +{ + int key, next_key, map_fd; + long long value; + + map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), 2); + if (map_fd < 0) { + printf("failed to create arraymap '%s'\n", strerror(errno)); + exit(1); + } + + key = 1; + value = 1234; + /* insert key=1 element */ + assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); + + value = 0; + assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && + errno == EEXIST); + + /* check that key=1 can be found */ + assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 1234); + + key = 0; + /* check that key=0 is also found and zero initialized */ + assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 0); + + + /* key=0 and key=1 were inserted, check that key=2 cannot be inserted + * due to max_entries limit + */ + key = 2; + assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == -1 && + errno == E2BIG); + + /* check that key = 2 doesn't exist */ + assert(bpf_lookup_elem(map_fd, &key, &value) == -1 && errno == ENOENT); + + /* iterate over two elements */ + assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && + next_key == 0); + assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && + next_key == 1); + assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && + errno == ENOENT); + + /* delete shouldn't succeed */ + key = 1; + assert(bpf_delete_elem(map_fd, &key) == -1 && errno == EINVAL); + + close(map_fd); +} + +#define MAP_SIZE (32 * 1024) +static void test_map_large(void) +{ + struct bigkey { + int a; + char b[116]; + long long c; + } key; + int map_fd, i, value; + + /* allocate 4Mbyte of memory */ + map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), + MAP_SIZE); + if (map_fd < 0) { + printf("failed to create large map '%s'\n", strerror(errno)); + exit(1); + } + + for (i = 0; i < MAP_SIZE; i++) { + key = (struct bigkey) {.c = i}; + value = i; + assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); + } + key.c = -1; + assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && + errno == E2BIG); + + /* iterate through all elements */ + for (i = 0; i < MAP_SIZE; i++) + assert(bpf_get_next_key(map_fd, &key, &key) == 0); + assert(bpf_get_next_key(map_fd, &key, &key) == -1 && errno == ENOENT); + + key.c = 0; + assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 0); + key.a = 1; + assert(bpf_lookup_elem(map_fd, &key, &value) == -1 && errno == ENOENT); + + close(map_fd); +} + +/* fork N children and wait for them to complete */ +static void run_parallel(int tasks, void (*fn)(int i, void *data), void *data) +{ + pid_t pid[tasks]; + int i; + + for (i = 0; i < tasks; i++) { + pid[i] = fork(); + if (pid[i] == 0) { + fn(i, data); + exit(0); + } else if (pid[i] == -1) { + printf("couldn't spawn #%d process\n", i); + exit(1); + } + } + for (i = 0; i < tasks; i++) { + int status; + + assert(waitpid(pid[i], &status, 0) == pid[i]); + assert(status == 0); + } +} + +static void test_map_stress(void) +{ + run_parallel(100, test_hashmap_sanity, NULL); + run_parallel(100, test_arraymap_sanity, NULL); +} + +#define TASKS 1024 +#define DO_UPDATE 1 +#define DO_DELETE 0 +static void do_work(int fn, void *data) +{ + int map_fd = ((int *)data)[0]; + int do_update = ((int *)data)[1]; + int i; + int key, value; + + for (i = fn; i < MAP_SIZE; i += TASKS) { + key = value = i; + if (do_update) + assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); + else + assert(bpf_delete_elem(map_fd, &key) == 0); + } +} + +static void test_map_parallel(void) +{ + int i, map_fd, key = 0, value = 0; + int data[2]; + + map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), + MAP_SIZE); + if (map_fd < 0) { + printf("failed to create map for parallel test '%s'\n", + strerror(errno)); + exit(1); + } + + data[0] = map_fd; + data[1] = DO_UPDATE; + /* use the same map_fd in children to add elements to this map + * child_0 adds key=0, key=1024, key=2048, ... + * child_1 adds key=1, key=1025, key=2049, ... + * child_1023 adds key=1023, ... + */ + run_parallel(TASKS, do_work, data); + + /* check that key=0 is already there */ + assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && + errno == EEXIST); + + /* check that all elements were inserted */ + key = -1; + for (i = 0; i < MAP_SIZE; i++) + assert(bpf_get_next_key(map_fd, &key, &key) == 0); + assert(bpf_get_next_key(map_fd, &key, &key) == -1 && errno == ENOENT); + + /* another check for all elements */ + for (i = 0; i < MAP_SIZE; i++) { + key = MAP_SIZE - i - 1; + assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && + value == key); + } + + /* now let's delete all elemenets in parallel */ + data[1] = DO_DELETE; + run_parallel(TASKS, do_work, data); + + /* nothing should be left */ + key = -1; + assert(bpf_get_next_key(map_fd, &key, &key) == -1 && errno == ENOENT); +} + +int main(void) +{ + test_hashmap_sanity(0, NULL); + test_arraymap_sanity(0, NULL); + test_map_large(); + test_map_parallel(); + test_map_stress(); + printf("test_maps: OK\n"); + return 0; +} diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index eb4bec0ad8afd2da3c8a52c61f1937425b082665..b96175e903639a2fe2c9b1d4777df0aaa63d6e6e 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c @@ -261,7 +261,7 @@ static struct bpf_test tests[] = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_EXIT_INSN(), }, .fixup = {2}, @@ -417,7 +417,7 @@ static struct bpf_test tests[] = { BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem), BPF_EXIT_INSN(), }, .errstr = "fd 0 is not pointing to valid bpf_map", @@ -430,7 +430,7 @@ static struct bpf_test tests[] = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), BPF_EXIT_INSN(), }, @@ -445,7 +445,7 @@ static struct bpf_test tests[] = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), BPF_EXIT_INSN(), @@ -461,7 +461,7 @@ static struct bpf_test tests[] = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), BPF_EXIT_INSN(), @@ -548,7 +548,7 @@ static struct bpf_test tests[] = { BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56), BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem), BPF_EXIT_INSN(), }, .fixup = {24}, @@ -602,6 +602,45 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, }, + { + "jump test 5", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, }; static int probe_filter_length(struct bpf_insn *fp) @@ -620,7 +659,7 @@ static int create_map(void) long long key, value = 0; int map_fd; - map_fd = bpf_create_map(BPF_MAP_TYPE_UNSPEC, sizeof(key), sizeof(value), 1024); + map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 1024); if (map_fd < 0) { printf("failed to create map '%s'\n", strerror(errno)); } @@ -630,7 +669,7 @@ static int create_map(void) static int test(void) { - int prog_fd, i; + int prog_fd, i, pass_cnt = 0, err_cnt = 0; for (i = 0; i < ARRAY_SIZE(tests); i++) { struct bpf_insn *prog = tests[i].insns; @@ -657,21 +696,25 @@ static int test(void) printf("FAIL\nfailed to load prog '%s'\n", strerror(errno)); printf("%s", bpf_log_buf); + err_cnt++; goto fail; } } else { if (prog_fd >= 0) { printf("FAIL\nunexpected success to load\n"); printf("%s", bpf_log_buf); + err_cnt++; goto fail; } if (strstr(bpf_log_buf, tests[i].errstr) == 0) { printf("FAIL\nunexpected error message: %s", bpf_log_buf); + err_cnt++; goto fail; } } + pass_cnt++; printf("OK\n"); fail: if (map_fd >= 0) @@ -679,6 +722,7 @@ fail: close(prog_fd); } + printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt); return 0; } diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h deleted file mode 100644 index d82b170bb216e85f783608b36c7215beff48f8c2..0000000000000000000000000000000000000000 --- a/tools/perf/util/include/asm/hash.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_GENERIC_HASH_H -#define __ASM_GENERIC_HASH_H - -/* Stub */ - -#endif /* __ASM_GENERIC_HASH_H */